code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =int(np.ceil((x_end - xa) / step_size ) )
__UpperCamelCase =np.zeros((n + 1,) )
__UpperCamelCase =ya
__UpperCamelCase =xa
for k in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A = spec.loader.load_module()
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCamelCase__ ( ):
snake_case : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case : Tuple = False
# source code of `config_class`
snake_case : Tuple = inspect.getsource(lowercase__ )
snake_case : Optional[int] = _re_checkpoint.findall(lowercase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case , snake_case : str = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case : Optional[int] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case : Any = True
break
snake_case : Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowercase__ )
if len(lowercase__ ) > 0:
snake_case : Optional[Any] = "\n".join(sorted(lowercase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 148 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def SCREAMING_SNAKE_CASE__ ( ) -> int:
lowercase__: str = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
lowercase__: Union[str, Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('''RGB''' )
return image
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: Tuple = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
lowercase__: Dict = dct.pop(__UpperCAmelCase )
lowercase__: Tuple = val
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__: str = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowercase__: Tuple = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowercase__: List[Any] = torch.cat((q_bias, torch.zeros_like(__UpperCAmelCase , requires_grad=__UpperCAmelCase ), v_bias) )
lowercase__: Any = qkv_bias
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: List[str] = 3_6_4 if '''coco''' in model_name else 2_2_4
lowercase__: Optional[Any] = InstructBlipVisionConfig(image_size=__UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase__: List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__: List[Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase__: Union[str, Any] = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase__: Any = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase__: int = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
lowercase__: Tuple = InstructBlipConfig(vision_config=__UpperCAmelCase , text_config=__UpperCAmelCase , qformer_config=__UpperCAmelCase )
return config, image_size
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> Tuple:
lowercase__: Dict = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
lowercase__: Dict = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase__: Union[str, Any] = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
lowercase__, lowercase__: Dict = get_blipa_config(__UpperCAmelCase )
lowercase__: Union[str, Any] = InstructBlipForConditionalGeneration(__UpperCAmelCase ).eval()
lowercase__: int = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
lowercase__, lowercase__: Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowercase__: Dict = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
lowercase__: Union[str, Any] = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
lowercase__, lowercase__, lowercase__: Dict = load_model_and_preprocess(
name=__UpperCAmelCase , model_type=__UpperCAmelCase , is_eval=__UpperCAmelCase , device=__UpperCAmelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowercase__: Tuple = original_model.state_dict()
lowercase__: Dict = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__: Dict = state_dict.pop(__UpperCAmelCase )
if key.startswith('''Qformer.bert''' ):
lowercase__: Any = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowercase__: List[str] = key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
lowercase__: int = key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowercase__: Union[str, Any] = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
lowercase__: Optional[Any] = key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
lowercase__: Any = key.replace('''t5''' , '''language''' )
lowercase__: int = val
# read in qv biases
read_in_q_v_bias(__UpperCAmelCase , __UpperCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
lowercase__: Union[str, Any] = load_demo_image()
lowercase__: int = '''What is unusual about this image?'''
# create processor
lowercase__: Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
lowercase__: int = InstructBlipProcessor(
image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase , )
lowercase__: Optional[int] = processor(images=__UpperCAmelCase , text=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# make sure processor creates exact same pixel values
lowercase__: Any = vis_processors['''eval'''](__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase )
lowercase__: Union[str, Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __UpperCAmelCase )
original_model.to(__UpperCAmelCase )
hf_model.to(__UpperCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
lowercase__: Optional[Any] = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
lowercase__: str = hf_model(**__UpperCAmelCase ).logits
else:
lowercase__: List[str] = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
lowercase__: Optional[int] = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(__UpperCAmelCase )
lowercase__: Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
lowercase__: str = hf_model(**__UpperCAmelCase , labels=__UpperCAmelCase ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase__: Optional[int] = 1e-4 if '''vicuna''' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __UpperCAmelCase , atol=__UpperCAmelCase )
print('''Looks ok!''' )
print('''Generating with original model...''' )
lowercase__: Optional[int] = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
lowercase__: Any = hf_model.generate(
**__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase__: List[Any] = 2
print('''Original generation:''' , __UpperCAmelCase )
lowercase__: Any = processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowercase__: List[str] = [text.strip() for text in output_text]
print('''HF generation:''' , __UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
__A = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__A = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 2 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "cvt"
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Dict = num_channels
lowercase__: str = patch_sizes
lowercase__: Optional[Any] = patch_stride
lowercase__: List[str] = patch_padding
lowercase__: Optional[Any] = embed_dim
lowercase__: Optional[int] = num_heads
lowercase__: Any = depth
lowercase__: str = mlp_ratio
lowercase__: Any = attention_drop_rate
lowercase__: Any = drop_rate
lowercase__: Optional[Any] = drop_path_rate
lowercase__: Dict = qkv_bias
lowercase__: Dict = cls_token
lowercase__: Any = qkv_projection_method
lowercase__: List[str] = kernel_qkv
lowercase__: Union[str, Any] = padding_kv
lowercase__: Optional[int] = stride_kv
lowercase__: int = padding_q
lowercase__: Dict = stride_q
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
| 2 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__lowerCAmelCase : Optional[int] ='src/diffusers'
# Matches is_xxx_available()
__lowerCAmelCase : str =re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
__lowerCAmelCase : Tuple =re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
__lowerCAmelCase : Union[str, Any] ='\n{0} = None\n'
__lowerCAmelCase : Optional[Any] ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
__lowerCAmelCase : List[Any] ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = _re_backend.findall(lowercase__ )
if len(lowercase__ ) == 0:
return None
return "_and_".join(lowercase__ )
def _UpperCamelCase ( ):
with open(os.path.join(lowercase__ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Get to the point we do the actual imports for type checking
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
# Go through the end of the file
while line_index < len(lowercase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__SCREAMING_SNAKE_CASE : str = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
__SCREAMING_SNAKE_CASE : str = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase__ ) and len(lines[line_index] ) > 1:
__SCREAMING_SNAKE_CASE : Tuple = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _re_single_line_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = objects
else:
line_index += 1
return backend_specific_objects
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if name.isupper():
return DUMMY_CONSTANT.format(lowercase__ )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase__ , lowercase__ )
else:
return DUMMY_CLASS.format(lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__=None ):
if backend_specific_objects is None:
__SCREAMING_SNAKE_CASE : List[str] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__SCREAMING_SNAKE_CASE : List[str] = {}
for backend, objects in backend_specific_objects.items():
__SCREAMING_SNAKE_CASE : int = '''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
__SCREAMING_SNAKE_CASE : List[str] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase__ , lowercase__ ) for o in objects] )
__SCREAMING_SNAKE_CASE : int = dummy_file
return dummy_files
def _UpperCamelCase ( lowercase__=False ):
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__SCREAMING_SNAKE_CASE : Optional[int] = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowercase__ , '''utils''' )
__SCREAMING_SNAKE_CASE : str = {
backend: os.path.join(lowercase__ , F'''dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py''' )
for backend in dummy_files.keys()
}
__SCREAMING_SNAKE_CASE : Tuple = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase__ ):
with open(lowercase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.read()
else:
__SCREAMING_SNAKE_CASE : int = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCAmelCase : Optional[int] =parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 9 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
__UpperCamelCase : List[Any] = """BlipImageProcessor"""
__UpperCamelCase : Union[str, Any] = """AutoTokenizer"""
def __init__( self : int , snake_case_ : str , snake_case_ : Optional[Any] ):
UpperCamelCase_: Optional[Any] = False
super().__init__(snake_case_ , snake_case_ )
UpperCamelCase_: Dict = self.image_processor
def __call__( self : Tuple , snake_case_ : ImageInput = None , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : List[Any] , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
UpperCamelCase_: int = self.tokenizer
UpperCamelCase_: Union[str, Any] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
UpperCamelCase_: Dict = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
UpperCamelCase_: Dict = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
UpperCamelCase_: Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def lowerCAmelCase__ ( self : int , *snake_case_ : Dict , **snake_case_ : Optional[int] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: str = self.tokenizer.model_input_names
UpperCamelCase_: Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 223 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = """ctrl"""
__UpperCamelCase : Dict = ["""past_key_values"""]
__UpperCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Dict , snake_case_ : Any=24_6534 , snake_case_ : Dict=256 , snake_case_ : str=1280 , snake_case_ : Optional[int]=8192 , snake_case_ : Union[str, Any]=48 , snake_case_ : Any=16 , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Any=1e-6 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Optional[int]=True , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Union[str, Any] = vocab_size
UpperCamelCase_: Union[str, Any] = n_positions
UpperCamelCase_: Optional[int] = n_embd
UpperCamelCase_: int = n_layer
UpperCamelCase_: str = n_head
UpperCamelCase_: Optional[int] = dff
UpperCamelCase_: Optional[Any] = resid_pdrop
UpperCamelCase_: Union[str, Any] = embd_pdrop
UpperCamelCase_: List[str] = layer_norm_epsilon
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = use_cache
super().__init__(**snake_case_ )
| 223 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : Any , __lowercase : List[Any]=13 , __lowercase : Union[str, Any]=7 , __lowercase : Any=True , __lowercase : Optional[int]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : List[Any]=99 , __lowercase : List[Any]=64 , __lowercase : Optional[Any]=32 , __lowercase : Optional[int]=5 , __lowercase : str=4 , __lowercase : List[Any]=37 , __lowercase : Union[str, Any]="gelu" , __lowercase : int=0.1 , __lowercase : Tuple=0.1 , __lowercase : str=512 , __lowercase : List[Any]=16 , __lowercase : Union[str, Any]=2 , __lowercase : int=0.02 , __lowercase : Any=3 , __lowercase : Optional[Any]=4 , __lowercase : Any=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = embedding_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : str , __lowercase : Any , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Any ):
'''simple docstring'''
__a = MobileBertModel(config=_A )
model.to(_A )
model.eval()
__a = model(_A , attention_mask=_A , token_type_ids=_A )
__a = model(_A , token_type_ids=_A )
__a = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Any , __lowercase : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = MobileBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
__a = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : int , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[Any] ):
'''simple docstring'''
__a = MobileBertForNextSentencePrediction(config=_A )
model.to(_A )
model.eval()
__a = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any] ):
'''simple docstring'''
__a = MobileBertForPreTraining(config=_A )
model.to(_A )
model.eval()
__a = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , next_sentence_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : int , __lowercase : int , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Any ):
'''simple docstring'''
__a = MobileBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__a = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : int , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Any ):
'''simple docstring'''
__a = self.num_labels
__a = MobileBertForSequenceClassification(_A )
model.to(_A )
model.eval()
__a = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[str] , __lowercase : str , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
__a = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Dict , __lowercase : str , __lowercase : List[Any] ):
'''simple docstring'''
__a = self.num_choices
__a = MobileBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
__a
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__lowerCamelCase : Any =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : List[Any] =(
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[Any] =True
def UpperCamelCase_ ( self : str , __lowercase : List[str] , __lowercase : str , __lowercase : Any=False ):
'''simple docstring'''
__a = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
__a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
__a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileBertModelTester(self )
__a = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_A )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_A )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_A )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_A )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_A )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_A )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_A )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
lowerCamelCase__ = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(_A )
__a = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__a = model(_A )[0]
__a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _A )
__a = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=_A , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 302 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = CTRLTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowerCAmelCase_ = dict(zip(_a , range(len(_a ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_a ) )
def __a ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_a )
def __a ( self , _a ) -> List[Any]:
lowerCAmelCase_ = "adapt react readapt apt"
lowerCAmelCase_ = "adapt react readapt apt"
return input_text, output_text
def __a ( self ) -> str:
lowerCAmelCase_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = "adapt react readapt apt"
lowerCAmelCase_ = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowerCAmelCase_ = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 22 |
import datasets
lowerCamelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowerCamelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A(__a: Dict , __a: Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __a ( self , _a , _a ) -> List[str]:
return {"accuracy": simple_accuracy(_a , _a )}
| 22 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : Any = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["LayoutLMv2FeatureExtractor"]
lowercase : Optional[int] = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__A : Any = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__A : Any = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Dict = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase_ : List[str] = int(re.match(R""".*layer_(\d*).*""" , A__ )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase_ : List[str] = re.search(R"""[^\d](\d+)$""" , str(A__ ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
lowerCAmelCase_ : Dict = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCamelCase_ ( A__ : int , A__ : Tuple , A__ : Any , A__ : Tuple , A__ : List[str] ):
'''simple docstring'''
if bloom_config_file == "":
lowerCAmelCase_ : Any = BloomConfig()
else:
lowerCAmelCase_ : Any = BloomConfig.from_json_file(A__ )
if shard_model:
lowerCAmelCase_ : Any = os.listdir(A__ )
lowerCAmelCase_ : Optional[int] = sorted(filter(lambda A__ : s.startswith("""layer""" ) and "model_00" in s , A__ ) )
lowerCAmelCase_ : Optional[Any] = {"""weight_map""": {}, """metadata""": {}}
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(A__ ):
print("""Processing file: {}""".format(A__ ) )
lowerCAmelCase_ : List[str] = None
for i in range(A__ ):
# load all TP files
lowerCAmelCase_ : Optional[Any] = file.replace("""model_00""" , f'model_0{i}' )
lowerCAmelCase_ : Optional[int] = torch.load(os.path.join(A__ , A__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowerCAmelCase_ : str = list(temp.keys() )
for key in keys:
lowerCAmelCase_ : Optional[int] = temp.pop(A__ )
if tensors is None:
lowerCAmelCase_ : str = temp
else:
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ : Optional[int] = tensors[key] / pretraining_tp
torch.save(
A__ , os.path.join(
A__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase_ : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase_ : Tuple = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) )
lowerCAmelCase_ : str = BloomConfig()
lowerCAmelCase_ : List[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowerCAmelCase_ : List[Any] = total_size
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ : Optional[int] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + """\n"""
f.write(A__ )
else:
lowerCAmelCase_ : int = BloomModel(A__ )
lowerCAmelCase_ : Union[str, Any] = os.listdir(A__ )
lowerCAmelCase_ : Tuple = sorted(filter(lambda A__ : s.startswith("""layer""" ) and "model_00" in s , A__ ) )
lowerCAmelCase_ : List[Any] = None
for i, file in enumerate(A__ ):
lowerCAmelCase_ : List[Any] = None
for i in range(A__ ):
# load all TP files
lowerCAmelCase_ : str = file.replace("""model_00""" , f'model_0{i}' )
lowerCAmelCase_ : Optional[Any] = torch.load(os.path.join(A__ , A__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowerCAmelCase_ : Union[str, Any] = list(temp.keys() )
for key in keys:
lowerCAmelCase_ : str = temp.pop(A__ )
if tensors is None:
lowerCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ : Optional[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ : Union[str, Any] = tensors[key] / pretraining_tp
lowerCAmelCase_ : Optional[int] = model.load_state_dict(A__ , strict=A__ )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
lowerCAmelCase_ : Any = set(other_keys.missing_keys )
else:
lowerCAmelCase_ : List[Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase_ : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCAmelCase_ : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
lowerCAmelCase_ : Any = model.to(config.torch_dtype )
torch.save(model.state_dict() , A__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__A : Dict = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 120 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase , UpperCAmelCase : Dict = y, x % y
return abs(UpperCamelCase )
def _snake_case ( ):
try:
UpperCAmelCase : List[str] = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
UpperCAmelCase : Tuple = int(nums[0] )
UpperCAmelCase : int = int(nums[1] )
print(
F"greatest_common_divisor({num_a}, {num_a}) = "
F"{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}" )
print(F"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}" )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 76 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
UpperCAmelCase : Optional[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , x.transpose() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Dict = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : int = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : str = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[str] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.squeeze(_SCREAMING_SNAKE_CASE ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : int = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Dict = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : List[str] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : str = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
| 76 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
lowercase__ = Image.open(requests.get(A , stream=A ).raw ).convert('''RGB''' )
return image
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Tuple:
"""simple docstring"""
lowercase__ = dct.pop(A )
lowercase__ = val
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
lowercase__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
lowercase__ = torch.cat((q_bias, torch.zeros_like(A , requires_grad=A ), v_bias) )
lowercase__ = qkv_bias
def _SCREAMING_SNAKE_CASE (A ) -> List[Any]:
"""simple docstring"""
lowercase__ = 364 if '''coco''' in model_name else 224
lowercase__ = InstructBlipVisionConfig(image_size=A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=32_001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase__ = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
lowercase__ = InstructBlipConfig(vision_config=A , text_config=A , qformer_config=A )
return config, image_size
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (A , A=None , A=False ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
lowercase__ = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase__ = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
lowercase__ ,lowercase__ = get_blipa_config(A )
lowercase__ = InstructBlipForConditionalGeneration(A ).eval()
lowercase__ = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
lowercase__ ,lowercase__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowercase__ = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
lowercase__ = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
lowercase__ ,lowercase__ ,lowercase__ = load_model_and_preprocess(
name=A , model_type=A , is_eval=A , device=A )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowercase__ = original_model.state_dict()
lowercase__ = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ = state_dict.pop(A )
if key.startswith('''Qformer.bert''' ):
lowercase__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowercase__ = key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
lowercase__ = key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowercase__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
lowercase__ = key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
lowercase__ = key.replace('''t5''' , '''language''' )
lowercase__ = val
# read in qv biases
read_in_q_v_bias(A , A )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A , strict=A )
lowercase__ = load_demo_image()
lowercase__ = '''What is unusual about this image?'''
# create processor
lowercase__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=A , image_std=A )
lowercase__ = InstructBlipProcessor(
image_processor=A , tokenizer=A , qformer_tokenizer=A , )
lowercase__ = processor(images=A , text=A , return_tensors='''pt''' ).to(A )
# make sure processor creates exact same pixel values
lowercase__ = vis_processors['''eval'''](A ).unsqueeze(0 ).to(A )
lowercase__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A )
original_model.to(A )
hf_model.to(A )
with torch.no_grad():
if "vicuna" in model_name:
lowercase__ = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
lowercase__ = hf_model(**A ).logits
else:
lowercase__ = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
lowercase__ = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(A )
lowercase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowercase__ = hf_model(**A , labels=A ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase__ = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , A , atol=A )
print('''Looks ok!''' )
print('''Generating with original model...''' )
lowercase__ = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
lowercase__ = hf_model.generate(
**A , do_sample=A , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase__ = 2
print('''Original generation:''' , A )
lowercase__ = processor.batch_decode(A , skip_special_tokens=A )
lowercase__ = [text.strip() for text in output_text]
print('''HF generation:''' , A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A )
hf_model.save_pretrained(A )
if push_to_hub:
processor.push_to_hub(f"Salesforce/{model_name}" )
hf_model.push_to_hub(f"Salesforce/{model_name}" )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
lowerCamelCase : Dict = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 2 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ = str(abs(A ) )
lowercase__ = [list(A ) for char in range(len(A ) )]
for index in range(len(A ) ):
num_transpositions[index].pop(A )
return max(
int(''''''.join(list(A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 2 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = VQModel
_lowerCamelCase : Union[str, Any] = 'sample'
@property
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=(32, 32) ):
A_ = 4
A_ = 3
A_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
return {"sample": image}
@property
def __A ( self : List[str] ):
return (3, 32, 32)
@property
def __A ( self : List[str] ):
return (3, 32, 32)
def __A ( self : Dict ):
A_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : Dict ):
pass
def __A ( self : List[Any] ):
pass
def __A ( self : Any ):
A_ , A_ = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase )
A_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __A ( self : Union[str, Any] ):
A_ = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
A_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
A_ = image.to(UpperCAmelCase )
with torch.no_grad():
A_ = model(UpperCAmelCase ).sample
A_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) ) | 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] ) | 329 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : float = 1 / sqrt(2 ) ):
lowercase_ :Tuple = tau * frequency / samplerate
lowercase_ :Any = sin(__lowerCamelCase )
lowercase_ :int = cos(__lowerCamelCase )
lowercase_ :int = _sin / (2 * q_factor)
lowercase_ :Dict = (1 - _cos) / 2
lowercase_ :str = 1 - _cos
lowercase_ :Dict = 1 + alpha
lowercase_ :str = -2 * _cos
lowercase_ :List[str] = 1 - alpha
lowercase_ :Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : float = 1 / sqrt(2 ) ):
lowercase_ :Dict = tau * frequency / samplerate
lowercase_ :List[str] = sin(__lowerCamelCase )
lowercase_ :List[str] = cos(__lowerCamelCase )
lowercase_ :List[str] = _sin / (2 * q_factor)
lowercase_ :Union[str, Any] = (1 + _cos) / 2
lowercase_ :Tuple = -1 - _cos
lowercase_ :str = 1 + alpha
lowercase_ :Union[str, Any] = -2 * _cos
lowercase_ :Dict = 1 - alpha
lowercase_ :int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : float = 1 / sqrt(2 ) ):
lowercase_ :int = tau * frequency / samplerate
lowercase_ :Any = sin(__lowerCamelCase )
lowercase_ :str = cos(__lowerCamelCase )
lowercase_ :Any = _sin / (2 * q_factor)
lowercase_ :Optional[Any] = _sin / 2
lowercase_ :Dict = 0
lowercase_ :Optional[int] = -ba
lowercase_ :Tuple = 1 + alpha
lowercase_ :Dict = -2 * _cos
lowercase_ :str = 1 - alpha
lowercase_ :str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : float = 1 / sqrt(2 ) ):
lowercase_ :List[Any] = tau * frequency / samplerate
lowercase_ :int = sin(__lowerCamelCase )
lowercase_ :int = cos(__lowerCamelCase )
lowercase_ :str = _sin / (2 * q_factor)
lowercase_ :Union[str, Any] = 1 - alpha
lowercase_ :Any = -2 * _cos
lowercase_ :Optional[Any] = 1 + alpha
lowercase_ :str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : float ,__lowerCamelCase : float = 1 / sqrt(2 ) ,):
lowercase_ :Union[str, Any] = tau * frequency / samplerate
lowercase_ :Any = sin(__lowerCamelCase )
lowercase_ :Tuple = cos(__lowerCamelCase )
lowercase_ :Tuple = _sin / (2 * q_factor)
lowercase_ :Any = 10 ** (gain_db / 40)
lowercase_ :Optional[int] = 1 + alpha * big_a
lowercase_ :Optional[Any] = -2 * _cos
lowercase_ :int = 1 - alpha * big_a
lowercase_ :int = 1 + alpha / big_a
lowercase_ :List[Any] = -2 * _cos
lowercase_ :Optional[int] = 1 - alpha / big_a
lowercase_ :Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : float ,__lowerCamelCase : float = 1 / sqrt(2 ) ,):
lowercase_ :Optional[int] = tau * frequency / samplerate
lowercase_ :Optional[int] = sin(__lowerCamelCase )
lowercase_ :int = cos(__lowerCamelCase )
lowercase_ :int = _sin / (2 * q_factor)
lowercase_ :Optional[Any] = 10 ** (gain_db / 40)
lowercase_ :str = (big_a + 1) - (big_a - 1) * _cos
lowercase_ :Dict = (big_a + 1) + (big_a - 1) * _cos
lowercase_ :Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
lowercase_ :List[str] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ :Tuple = 2 * sqrt(__lowerCamelCase ) * alpha
lowercase_ :Optional[int] = big_a * (pmc + aaa)
lowercase_ :int = 2 * big_a * mpc
lowercase_ :List[Any] = big_a * (pmc - aaa)
lowercase_ :Optional[int] = ppmc + aaa
lowercase_ :Any = -2 * pmpc
lowercase_ :Union[str, Any] = ppmc - aaa
lowercase_ :Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : float ,__lowerCamelCase : float = 1 / sqrt(2 ) ,):
lowercase_ :str = tau * frequency / samplerate
lowercase_ :Dict = sin(__lowerCamelCase )
lowercase_ :List[str] = cos(__lowerCamelCase )
lowercase_ :Optional[Any] = _sin / (2 * q_factor)
lowercase_ :List[str] = 10 ** (gain_db / 40)
lowercase_ :Optional[int] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ :Tuple = (big_a + 1) + (big_a - 1) * _cos
lowercase_ :List[str] = (big_a - 1) - (big_a + 1) * _cos
lowercase_ :Dict = (big_a - 1) + (big_a + 1) * _cos
lowercase_ :int = 2 * sqrt(__lowerCamelCase ) * alpha
lowercase_ :int = big_a * (ppmc + aaa)
lowercase_ :Any = -2 * big_a * pmpc
lowercase_ :Dict = big_a * (ppmc - aaa)
lowercase_ :Any = pmc + aaa
lowercase_ :List[str] = 2 * mpc
lowercase_ :List[str] = pmc - aaa
lowercase_ :Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 223 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowerCamelCase : int ):
lowercase_ :Dict = []
lowercase_ :List[Any] = 2
lowercase_ :Optional[Any] = int(math.sqrt(__lowerCamelCase ) ) # Size of every segment
lowercase_ :Optional[Any] = [True] * (end + 1)
lowercase_ :Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(__lowerCamelCase )
for i in range(start * start ,end + 1 ,__lowerCamelCase ):
lowercase_ :List[str] = False
start += 1
prime += in_prime
lowercase_ :Dict = end + 1
lowercase_ :Dict = min(2 * end ,__lowerCamelCase )
while low <= n:
lowercase_ :Any = [True] * (high - low + 1)
for each in in_prime:
lowercase_ :List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__lowerCamelCase ,high + 1 ,__lowerCamelCase ):
lowercase_ :str = False
for j in range(len(__lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowercase_ :Dict = high + 1
lowercase_ :Dict = min(high + end ,__lowerCamelCase )
return prime
print(sieve(10**6))
| 223 | 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __lowercase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
UpperCamelCase__ = ''''''
UpperCamelCase__ = ''''''
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = 256
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
def A_ ( self : Dict , _a : str ):
UpperCamelCase__ = cva.imread(_a , 0 )
UpperCamelCase__ = copy.deepcopy(self.img )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
UpperCamelCase__ = np.sum(_a )
for i in range(len(_a ) ):
UpperCamelCase__ = x[i] / self.k
self.sk += prk
UpperCamelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase__ = int(last % last )
UpperCamelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_a )
UpperCamelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase__ = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def A_ ( self : Dict ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A_ ( self : Any ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
lowercase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 35 | import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase = logging.get_logger(__name__)
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : List[str] , *_a : Any , **_a : str ):
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 35 | 1 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = DownBlockaD # noqa F405
_lowerCamelCase : Optional[int] = """down"""
def lowercase ( self : List[Any] ):
_UpperCAmelCase = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = ResnetDownsampleBlockaD # noqa F405
_lowerCamelCase : Tuple = """down"""
def lowercase ( self : Dict ):
_UpperCAmelCase = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Optional[int] = AttnDownBlockaD # noqa F405
_lowerCamelCase : Optional[int] = """down"""
def lowercase ( self : Dict ):
_UpperCAmelCase = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = CrossAttnDownBlockaD # noqa F405
_lowerCamelCase : Dict = """down"""
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : str = SimpleCrossAttnDownBlockaD # noqa F405
_lowerCamelCase : List[Any] = """down"""
@property
def lowercase ( self : Optional[Any] ):
return super().get_dummy_input(include_encoder_hidden_states=snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowercase ( self : Dict ):
_UpperCAmelCase = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = SkipDownBlockaD # noqa F405
_lowerCamelCase : Optional[int] = """down"""
@property
def lowercase ( self : List[str] ):
return super().get_dummy_input(include_skip_sample=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = AttnSkipDownBlockaD # noqa F405
_lowerCamelCase : Optional[int] = """down"""
@property
def lowercase ( self : Any ):
return super().get_dummy_input(include_skip_sample=snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : str = DownEncoderBlockaD # noqa F405
_lowerCamelCase : Any = """down"""
@property
def lowercase ( self : Dict ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = {
"in_channels": 3_2,
"out_channels": 3_2,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = AttnDownEncoderBlockaD # noqa F405
_lowerCamelCase : str = """down"""
@property
def lowercase ( self : str ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = {
"in_channels": 3_2,
"out_channels": 3_2,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : Tuple ):
_UpperCAmelCase = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = UNetMidBlockaD # noqa F405
_lowerCamelCase : Dict = """mid"""
def lowercase ( self : List[str] ):
_UpperCAmelCase = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = UNetMidBlockaDCrossAttn # noqa F405
_lowerCamelCase : Any = """mid"""
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
def lowercase ( self : str ):
_UpperCAmelCase = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowerCamelCase : List[Any] = """mid"""
@property
def lowercase ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
def lowercase ( self : Dict ):
_UpperCAmelCase = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : int = UpBlockaD # noqa F405
_lowerCamelCase : Optional[int] = """up"""
@property
def lowercase ( self : List[str] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Any = ResnetUpsampleBlockaD # noqa F405
_lowerCamelCase : Union[str, Any] = """up"""
@property
def lowercase ( self : List[str] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Any = CrossAttnUpBlockaD # noqa F405
_lowerCamelCase : Any = """up"""
@property
def lowercase ( self : List[str] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowercase ( self : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
def lowercase ( self : int ):
_UpperCAmelCase = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
_lowerCamelCase : Tuple = """up"""
@property
def lowercase ( self : Any ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ , include_encoder_hidden_states=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = AttnUpBlockaD # noqa F405
_lowerCamelCase : str = """up"""
@property
def lowercase ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = SkipUpBlockaD # noqa F405
_lowerCamelCase : Union[str, Any] = """up"""
@property
def lowercase ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = AttnSkipUpBlockaD # noqa F405
_lowerCamelCase : int = """up"""
@property
def lowercase ( self : List[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = UpDecoderBlockaD # noqa F405
_lowerCamelCase : Tuple = """up"""
@property
def lowercase ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = {"in_channels": 3_2, "out_channels": 3_2}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = AttnUpDecoderBlockaD # noqa F405
_lowerCamelCase : int = """up"""
@property
def lowercase ( self : Optional[int] ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = {"in_channels": 3_2, "out_channels": 3_2}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(snake_case_ )
| 22 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) )
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(__lowercase )
_UpperCAmelCase = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
| 22 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase : int = logging.get_logger(__name__)
_lowercase : Any = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''blip_2_vision_model'''
def __init__( self : Optional[int] , lowercase_ : Union[str, Any]=1408 , lowercase_ : List[str]=6144 , lowercase_ : Union[str, Any]=39 , lowercase_ : List[str]=16 , lowercase_ : Optional[Any]=224 , lowercase_ : int=14 , lowercase_ : str="gelu" , lowercase_ : int=0.0_00_01 , lowercase_ : List[Any]=0.0 , lowercase_ : int=1E-10 , lowercase_ : int=True , **lowercase_ : Tuple , ):
super().__init__(**lowercase_ )
lowercase_ : Any = hidden_size
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : Optional[Any] = patch_size
lowercase_ : Tuple = image_size
lowercase_ : List[Any] = initializer_range
lowercase_ : Any = attention_dropout
lowercase_ : str = layer_norm_eps
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[Any] = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : int ):
cls._set_token_in_kwargs(lowercase_ )
lowercase_ , lowercase_ : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowercase_ : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''blip_2_qformer'''
def __init__( self : Optional[Any] , lowercase_ : Any=30522 , lowercase_ : Union[str, Any]=768 , lowercase_ : Any=12 , lowercase_ : str=12 , lowercase_ : List[str]=3072 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : int=0.1 , lowercase_ : List[str]=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int=2 , lowercase_ : Any=1408 , **lowercase_ : Any , ):
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
lowercase_ : Dict = vocab_size
lowercase_ : List[Any] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : List[str] = hidden_act
lowercase_ : Any = intermediate_size
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Dict = max_position_embeddings
lowercase_ : Tuple = initializer_range
lowercase_ : str = layer_norm_eps
lowercase_ : int = position_embedding_type
lowercase_ : List[str] = cross_attention_frequency
lowercase_ : int = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Union[str, Any] ):
cls._set_token_in_kwargs(lowercase_ )
lowercase_ , lowercase_ : Dict = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowercase_ : Union[str, Any] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''blip-2'''
UpperCamelCase__ = True
def __init__( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None , lowercase_ : Any=32 , **lowercase_ : Any ):
super().__init__(**lowercase_ )
if vision_config is None:
lowercase_ : Any = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
lowercase_ : str = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
lowercase_ : int = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowercase_ : List[Any] = BlipaVisionConfig(**lowercase_ )
lowercase_ : List[str] = BlipaQFormerConfig(**lowercase_ )
lowercase_ : int = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowercase_ : Tuple = CONFIG_MAPPING[text_model_type](**lowercase_ )
lowercase_ : Dict = self.text_config.tie_word_embeddings
lowercase_ : Tuple = self.text_config.is_encoder_decoder
lowercase_ : Any = num_query_tokens
lowercase_ : Tuple = self.vision_config.hidden_size
lowercase_ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase_ : Optional[int] = 1.0
lowercase_ : Dict = 0.02
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[str] , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = copy.deepcopy(self.__dict__ )
lowercase_ : List[str] = self.vision_config.to_dict()
lowercase_ : Optional[Any] = self.qformer_config.to_dict()
lowercase_ : List[Any] = self.text_config.to_dict()
lowercase_ : List[str] = self.__class__.model_type
return output
| 21 | '''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """decord""" )
self.check_model_type(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ):
lowercase_ : Union[str, Any] = {}
if frame_sampling_rate is not None:
lowercase_ : Any = frame_sampling_rate
if num_frames is not None:
lowercase_ : Optional[Any] = num_frames
lowercase_ : Union[str, Any] = {}
if top_k is not None:
lowercase_ : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ):
if num_frames is None:
lowercase_ : List[Any] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content )
lowercase_ : Optional[Any] = VideoReader(lowercase_ )
videoreader.seek(0 )
lowercase_ : Tuple = 0
lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1
lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa )
lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy()
lowercase_ : Union[str, Any] = list(lowercase_ )
lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ):
lowercase_ : int = self.model(**lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ):
if top_k > self.model.config.num_labels:
lowercase_ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase_ : str = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowercase_ : Union[str, Any] = scores.tolist()
lowercase_ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 21 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = r"\w+[.]\d+"
SCREAMING_SNAKE_CASE : List[str] = re.findall(_a , _a)
for pat in pats:
SCREAMING_SNAKE_CASE : List[str] = key.replace(_a , "_".join(pat.split(".")))
return key
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key)
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
SCREAMING_SNAKE_CASE : List[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE : int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE : List[Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
SCREAMING_SNAKE_CASE : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0)
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
SCREAMING_SNAKE_CASE : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE : Tuple = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE : Dict = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( _a , _a , _a=42):
# Step 1: Convert pytorch tensor to numpy
SCREAMING_SNAKE_CASE : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
SCREAMING_SNAKE_CASE : str = flax_model.init_weights(PRNGKey(_a))
SCREAMING_SNAKE_CASE : str = flatten_dict(_a)
SCREAMING_SNAKE_CASE : Any = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE : Tuple = rename_key(_a)
SCREAMING_SNAKE_CASE : Any = tuple(renamed_pt_key.split("."))
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = rename_key_and_reshape_tensor(_a , _a , _a)
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.")
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : List[Any] = jnp.asarray(_a)
return unflatten_dict(_a) | 76 |
import baseaa
def lowerCamelCase__ ( _a):
return baseaa.aaaencode(string.encode("utf-8"))
def lowerCamelCase__ ( _a):
return baseaa.aaadecode(_a).decode("utf-8")
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 1 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_UpperCAmelCase : List[str] = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_UpperCAmelCase : List[Any] = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = SavedModel()
__lowerCAmelCase = []
with open(os.path.join(lowerCamelCase, '''utils''', '''tf_ops''', '''onnx.json''')) as f:
__lowerCAmelCase = json.load(lowerCamelCase)['''opsets''']
for i in range(1, opset + 1):
onnx_ops.extend(onnx_opsets[str(lowerCamelCase)])
with open(lowerCamelCase, '''rb''') as f:
saved_model.ParseFromString(f.read())
__lowerCAmelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node)
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def)
# Convert to list, sorted if you want
__lowerCAmelCase = sorted(lowerCamelCase)
__lowerCAmelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowerCamelCase)
if strict and len(lowerCamelCase) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops)
elif len(lowerCamelCase) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""")
print(*lowerCamelCase, sep='''\n''')
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""")
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=1_2, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
_UpperCAmelCase : List[Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 9 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = 'roberta'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 | 1 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( _lowerCAmelCase ) -> List[Tuple[int, ...]]:
__lowerCamelCase : int = []
if isinstance(a__ ,a__ ):
for v in tree.values():
shapes.extend(_fetch_dims(a__ ) )
elif isinstance(a__ ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(a__ ) )
elif isinstance(a__ ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Tuple[int, ...]:
__lowerCamelCase : Any = []
for d in reversed(a__ ):
idx.append(flat_idx % d )
__lowerCamelCase : Dict = flat_idx // d
return tuple(reversed(a__ ) )
@torch.jit.ignore
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,) -> List[Tuple[slice, ...]]:
def reduce_edge_list(_lowerCAmelCase ) -> None:
__lowerCamelCase : Optional[int] = True
for i in range(len(a__ ) ):
__lowerCamelCase : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
__lowerCamelCase : Optional[int] = l[reversed_idx]
if start_edges is None:
__lowerCamelCase : List[Any] = [s == 0 for s in start]
reduce_edge_list(a__ )
if end_edges is None:
__lowerCamelCase : Optional[int] = [e == (d - 1) for e, d in zip(a__ ,a__ )]
reduce_edge_list(a__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(a__ ) == 0:
return [()]
elif len(a__ ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[Any] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(a__ ,a__ ):
if s == e:
path_list.append(slice(a__ ,s + 1 ) )
else:
break
__lowerCamelCase : Optional[Any] = tuple(a__ )
__lowerCamelCase : Optional[int] = len(a__ )
# start == end, and we're done
if divergence_idx == len(a__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase : List[str] = start[divergence_idx]
return tuple(
path + (slice(a__ ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase : List[Any] = end[divergence_idx]
return tuple(
path + (slice(a__ ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowerCamelCase : Tuple = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> torch.Tensor:
__lowerCamelCase : Union[str, Any] = t.shape[:no_batch_dims]
__lowerCamelCase : int = list(_flat_idx_to_idx(a__ ,a__ ) )
# _get_minimal_slice_set is inclusive
__lowerCamelCase : Dict = list(_flat_idx_to_idx(flat_end - 1 ,a__ ) )
# Get an ordered list of slices to perform
__lowerCamelCase : Union[str, Any] = _get_minimal_slice_set(
a__ ,a__ ,a__ ,)
__lowerCamelCase : Dict = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,) -> Any:
if not (len(a__ ) > 0):
raise ValueError('Must provide at least one input' )
__lowerCamelCase : int = [shape[:no_batch_dims] for shape in _fetch_dims(a__ )]
__lowerCamelCase : int = tuple([max(a__ ) for s in zip(*a__ )] )
def _prep_inputs(_lowerCAmelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowerCamelCase : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowerCamelCase : str = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
__lowerCamelCase : int = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowerCamelCase : Any = tensor_tree_map(_prep_inputs ,a__ )
__lowerCamelCase : List[Any] = None
if _out is not None:
__lowerCamelCase : List[Any] = tensor_tree_map(lambda _lowerCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
__lowerCamelCase : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowerCamelCase : str = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCAmelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = prepped_outputs
for _ in range(a__ ):
# Chunk the input
if not low_mem:
__lowerCamelCase : Dict = _select_chunk
else:
__lowerCamelCase : Dict = partial(
_chunk_slice ,flat_start=a__ ,flat_end=min(a__ ,i + chunk_size ) ,no_batch_dims=len(a__ ) ,)
__lowerCamelCase : Union[str, Any] = tensor_tree_map(a__ ,a__ )
# Run the layer on the chunk
__lowerCamelCase : Optional[Any] = layer(**a__ )
# Allocate space for the output
if out is None:
__lowerCamelCase : Union[str, Any] = tensor_tree_map(lambda _lowerCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,a__ )
# Put the chunk in its pre-allocated space
if isinstance(a__ ,a__ ):
def assign(_lowerCAmelCase ,_lowerCAmelCase ) -> None:
for k, v in da.items():
if isinstance(a__ ,a__ ):
assign(a__ ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowerCamelCase : Union[str, Any] = da[k]
assign(a__ ,a__ )
elif isinstance(a__ ,a__ ):
for xa, xa in zip(a__ ,a__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowerCamelCase : Dict = xa
elif isinstance(a__ ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowerCamelCase : Tuple = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
__lowerCamelCase : List[Any] = tensor_tree_map(lambda _lowerCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) ,a__ )
return out
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _a : Dict = 512 , ) -> Union[str, Any]:
__lowerCamelCase : str = max_chunk_size
__lowerCamelCase : List[Any] = None
__lowerCamelCase : Optional[Any] = None
def _lowercase ( self : Optional[int] , _a : List[str] , _a : Dict , _a : List[Any] ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowerCamelCase : Tuple = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowerCamelCase : int = [c for c in candidates if c > min_chunk_size]
__lowerCamelCase : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_a : Dict ) -> bool:
try:
with torch.no_grad():
fn(*_SCREAMING_SNAKE_CASE , chunk_size=_SCREAMING_SNAKE_CASE )
return True
except RuntimeError:
return False
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : int = len(_SCREAMING_SNAKE_CASE ) - 1
while i > min_viable_chunk_size_index:
__lowerCamelCase : str = test_chunk_size(candidates[i] )
if not viable:
__lowerCamelCase : Tuple = (min_viable_chunk_size_index + i) // 2
else:
__lowerCamelCase : Tuple = i
__lowerCamelCase : Optional[Any] = (i + len(_SCREAMING_SNAKE_CASE ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase ( self : Dict , _a : Tuple , _a : Dict ) -> bool:
__lowerCamelCase : Optional[int] = True
for aa, aa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert type(_SCREAMING_SNAKE_CASE ) == type(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
consistent &= self._compare_arg_caches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Tuple = [v for _, v in sorted(aa.items() , key=lambda _a : x[0] )]
__lowerCamelCase : List[Any] = [v for _, v in sorted(aa.items() , key=lambda _a : x[0] )]
consistent &= self._compare_arg_caches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
consistent &= aa == aa
return consistent
def _lowercase ( self : int , _a : Tuple , _a : List[str] , _a : str , ) -> int:
__lowerCamelCase : int = True
__lowerCamelCase : List[Any] = tree_map(lambda _a : a.shape if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) else a , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = self._compare_arg_caches(self.cached_arg_data , _SCREAMING_SNAKE_CASE )
else:
# Otherwise, we can reuse the precomputed value
__lowerCamelCase : Any = False
if not consistent:
__lowerCamelCase : Dict = self._determine_favorable_chunk_size(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
__lowerCamelCase : Optional[int] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 208 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _a :
'''simple docstring'''
A : Union[str, Any] = 42
# setable values
A : Optional[Any] = 42
A : List[str] = 42
A : int = None
@classmethod
def UpperCamelCase_ ( cls, A, A, A ):
'''simple docstring'''
return cls(common=_a, init_noise_sigma=_a, timesteps=_a )
@dataclass
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[Any] = 42
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Union[str, Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
A : str = 42
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self, A = 1_000, A = 0.00_01, A = 0.02, A = "linear", A = None, A = "fixed_small", A = True, A = "epsilon", A = jnp.floataa, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dtype
def UpperCamelCase_ ( self, A = None ):
'''simple docstring'''
if common is None:
SCREAMING_SNAKE_CASE : List[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : List[str] = jnp.array(1.0, dtype=self.dtype )
SCREAMING_SNAKE_CASE : List[Any] = jnp.arange(0, self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a, init_noise_sigma=_a, timesteps=_a, )
def UpperCamelCase_ ( self, A, A, A = None ):
'''simple docstring'''
return sample
def UpperCamelCase_ ( self, A, A, A = () ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE : Optional[Any] = (jnp.arange(0, _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a, timesteps=_a, )
def UpperCamelCase_ ( self, A, A, A=None, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE : List[Any] = jnp.clip(_a, a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE : Tuple = jnp.log(jnp.clip(_a, a_min=1E-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE : str = variance
SCREAMING_SNAKE_CASE : Optional[Any] = state.common.betas[t]
SCREAMING_SNAKE_CASE : str = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self, A, A, A, A, A = None, A = True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = timestep
if key is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.split(_a, sample.shape[1], axis=1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE : Tuple = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE : Tuple = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE : Any = jnp.clip(_a, -1, 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE : Tuple = jax.random.split(_a, num=1 )
SCREAMING_SNAKE_CASE : Optional[int] = jax.random.normal(_a, shape=model_output.shape, dtype=self.dtype )
return (self._get_variance(_a, _a, predicted_variance=_a ) ** 0.5) * noise
SCREAMING_SNAKE_CASE : List[Any] = jnp.where(t > 0, random_variance(), jnp.zeros(model_output.shape, dtype=self.dtype ) )
SCREAMING_SNAKE_CASE : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a, state=_a )
def UpperCamelCase_ ( self, A, A, A, A, ):
'''simple docstring'''
return add_noise_common(state.common, _a, _a, _a )
def UpperCamelCase_ ( self, A, A, A, A, ):
'''simple docstring'''
return get_velocity_common(state.common, _a, _a, _a )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 369 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
UpperCamelCase_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Any = ['''input_ids''', '''attention_mask''']
A : Dict = NllbTokenizer
A : List[int] = []
A : List[int] = []
def __init__( self, A=None, A=None, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=None, A=None, A=None, A=False, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token
SCREAMING_SNAKE_CASE : Tuple = legacy_behaviour
super().__init__(
vocab_file=A, tokenizer_file=A, bos_token=A, eos_token=A, sep_token=A, cls_token=A, unk_token=A, pad_token=A, mask_token=A, src_lang=A, tgt_lang=A, additional_special_tokens=A, legacy_behaviour=A, **A, )
SCREAMING_SNAKE_CASE : Any = vocab_file
SCREAMING_SNAKE_CASE : Optional[int] = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
SCREAMING_SNAKE_CASE : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE : Dict = src_lang if src_lang is not None else 'eng_Latn'
SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self, A, A, A, A, **A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE : Union[str, Any] = src_lang
SCREAMING_SNAKE_CASE : List[Any] = self(A, add_special_tokens=A, return_tensors=A, **A )
SCREAMING_SNAKE_CASE : Any = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : int = tgt_lang_id
return inputs
def UpperCamelCase_ ( self, A, A = "eng_Latn", A = None, A = "fra_Latn", **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = src_lang
SCREAMING_SNAKE_CASE : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(A, A, **A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE : Tuple = [self.cur_lang_code]
SCREAMING_SNAKE_CASE : List[Any] = [self.eos_token_id]
SCREAMING_SNAKE_CASE : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE : Dict = [self.cur_lang_code]
SCREAMING_SNAKE_CASE : str = [self.eos_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
SCREAMING_SNAKE_CASE : int = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file, A )
return (out_vocab_file,)
| 246 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ):
snake_case__ : List[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : int = image_size
snake_case__ : List[Any] = num_channels
snake_case__ : Optional[Any] = embeddings_size
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Tuple = depths
snake_case__ : Any = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[int] = num_labels
snake_case__ : int = scope
snake_case__ : Tuple = len(snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : int ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ )
snake_case__ : int = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ):
snake_case__ : str = self.num_labels
snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ )
snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = TFResNetModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCamelCase ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase ( self : int ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase ( self : List[Any] ):
pass
def lowerCamelCase ( self : List[Any] ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(snake_case_ )
snake_case__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ):
snake_case__ : List[Any] = model_class(snake_case_ )
snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Dict = layer_type
snake_case__ : Optional[int] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Optional[int]:
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[Any] = model(**snake_case_ )
# verify the logits
snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
| 35 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =TransfoXLTokenizer
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Any:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCamelCase ( self , **_A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(_A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [0, 4, 8, 7] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(_A ) , _A )
self.assertEqual(tokenizer.convert_tokens_to_string(_A ) , _A )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(_A )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 366 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =TransfoXLTokenizer
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Any:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCamelCase ( self , **_A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(_A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [0, 4, 8, 7] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(_A ) , _A )
self.assertEqual(tokenizer.convert_tokens_to_string(_A ) , _A )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(_A )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 257 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : List[Any] = []
for part_id in partition_order:
_lowercase : List[str] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowerCamelCase_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_( ) -> Union[str, Any]:
_lowercase : Optional[int] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowercase : Any = spark.range(100 ).repartition(1 )
_lowercase : int = Spark(lowerCamelCase_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_( ) -> str:
_lowercase : Any = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowercase : Union[str, Any] = spark.range(10 ).repartition(2 )
_lowercase : Optional[Any] = [1, 0]
_lowercase : int = _generate_iterable_examples(lowerCamelCase_ , lowerCamelCase_ ) # Reverse the partitions.
_lowercase : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase_ , lowerCamelCase_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowercase , _lowercase : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_( ) -> Tuple:
_lowercase : Optional[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowercase : Dict = spark.range(10 ).repartition(1 )
_lowercase : Any = SparkExamplesIterable(lowerCamelCase_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCamelCase_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_( ) -> str:
_lowercase : Optional[int] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowercase : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
_lowercase : Dict = lambda lowerCamelCase_ : x.reverse()
_lowercase : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase_ , [2, 1, 0] )
_lowercase : Dict = SparkExamplesIterable(lowerCamelCase_ ).shuffle_data_sources(lowerCamelCase_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCamelCase_ ):
_lowercase , _lowercase : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_( ) -> Union[str, Any]:
_lowercase : List[str] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowercase : int = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowercase : Any = SparkExamplesIterable(lowerCamelCase_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowercase : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCamelCase_ ):
_lowercase , _lowercase : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowercase : Union[str, Any] = SparkExamplesIterable(lowerCamelCase_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowercase : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCamelCase_ ):
_lowercase , _lowercase : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_( ) -> Optional[int]:
_lowercase : Union[str, Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_lowercase : List[str] = spark.range(100 ).repartition(1 )
_lowercase : Dict = Spark(lowerCamelCase_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 21 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """deformable_detr"""
lowercase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=3, lowerCamelCase=3_00, lowerCamelCase=10_24, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase="resnet50", lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=False, lowerCamelCase=3_00, lowerCamelCase=False, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : List[str] = backbone_config.get('model_type')
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Optional[int] = config_class.from_dict(lowerCamelCase)
_lowercase : Tuple = use_timm_backbone
_lowercase : List[str] = backbone_config
_lowercase : Tuple = num_channels
_lowercase : Optional[Any] = num_queries
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Optional[int] = d_model
_lowercase : int = encoder_ffn_dim
_lowercase : List[Any] = encoder_layers
_lowercase : str = encoder_attention_heads
_lowercase : str = decoder_ffn_dim
_lowercase : Optional[Any] = decoder_layers
_lowercase : List[str] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Any = activation_function
_lowercase : Optional[int] = init_std
_lowercase : int = init_xavier_std
_lowercase : Union[str, Any] = encoder_layerdrop
_lowercase : Tuple = auxiliary_loss
_lowercase : Union[str, Any] = position_embedding_type
_lowercase : str = backbone
_lowercase : List[Any] = use_pretrained_backbone
_lowercase : Any = dilation
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : Dict = encoder_n_points
_lowercase : Dict = decoder_n_points
_lowercase : Dict = two_stage
_lowercase : Union[str, Any] = two_stage_num_proposals
_lowercase : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : Tuple = class_cost
_lowercase : int = bbox_cost
_lowercase : Optional[int] = giou_cost
# Loss coefficients
_lowercase : Optional[Any] = mask_loss_coefficient
_lowercase : Dict = dice_loss_coefficient
_lowercase : Tuple = bbox_loss_coefficient
_lowercase : Optional[int] = giou_loss_coefficient
_lowercase : Union[str, Any] = eos_coefficient
_lowercase : Union[str, Any] = focal_alpha
_lowercase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_lowercase : Union[str, Any] = self.backbone_config.to_dict()
_lowercase : Tuple = self.__class__.model_type
return output
| 21 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a_ = logging.get_logger(__name__)
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple=None, UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any =tensor_name.split('''.''' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : Dict =getattr(UpperCamelCase__, UpperCamelCase__ )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
SCREAMING_SNAKE_CASE__ : List[str] =new_module
SCREAMING_SNAKE_CASE__ : Optional[int] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
SCREAMING_SNAKE_CASE__ : List[str] =tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : int =getattr(UpperCamelCase__, UpperCamelCase__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
SCREAMING_SNAKE_CASE__ : Optional[Any] =False
SCREAMING_SNAKE_CASE__ : Any =False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : Dict =False
SCREAMING_SNAKE_CASE__ : Any =False
else:
SCREAMING_SNAKE_CASE__ : Dict =hasattr(bnb.nn, '''Params4bit''' ) and isinstance(module._parameters[tensor_name], bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : int =isinstance(module._parameters[tensor_name], bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : Optional[Any] =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =old_value.to(UpperCamelCase__ )
elif isinstance(UpperCamelCase__, torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =value.to('''cpu''' )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : str =version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.tensor(UpperCamelCase__, device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, UpperCamelCase__ ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =new_value.T
SCREAMING_SNAKE_CASE__ : List[str] =old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : List[Any] =bnb.nn.IntaParams(UpperCamelCase__, requires_grad=UpperCamelCase__, **UpperCamelCase__ ).to(UpperCamelCase__ )
elif is_abit:
SCREAMING_SNAKE_CASE__ : Tuple =bnb.nn.Paramsabit(UpperCamelCase__, requires_grad=UpperCamelCase__, **UpperCamelCase__ ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight, '''SCB''', fpaa_statistics.to(UpperCamelCase__ ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : int =old_value.to(UpperCamelCase__ )
elif isinstance(UpperCamelCase__, torch.Tensor ):
SCREAMING_SNAKE_CASE__ : List[str] =value.to(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor(UpperCamelCase__, device=UpperCamelCase__ )
if is_buffer:
SCREAMING_SNAKE_CASE__ : Optional[int] =new_value
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =nn.Parameter(UpperCamelCase__, requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : Any =new_value
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : int =[]
current_key_name.append(UpperCamelCase__ )
if (isinstance(UpperCamelCase__, nn.Linear ) or isinstance(UpperCamelCase__, UpperCamelCase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(UpperCamelCase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : int =module.in_features
SCREAMING_SNAKE_CASE__ : int =module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : Optional[Any] =bnb.nn.LinearabitLt(
UpperCamelCase__, UpperCamelCase__, module.bias is not None, has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight, threshold=quantization_config.llm_inta_threshold, )
SCREAMING_SNAKE_CASE__ : Tuple =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =bnb.nn.Linearabit(
UpperCamelCase__, UpperCamelCase__, module.bias is not None, quantization_config.bnb_abit_compute_dtype, compress_statistics=quantization_config.bnb_abit_use_double_quant, quant_type=quantization_config.bnb_abit_quant_type, )
SCREAMING_SNAKE_CASE__ : List[Any] =True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Dict =type(UpperCamelCase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(UpperCamelCase__ )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =_replace_with_bnb_linear(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, has_been_replaced=UpperCamelCase__, )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Dict=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =_replace_with_bnb_linear(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _a( *UpperCamelCase__ : str, **UpperCamelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''', UpperCamelCase__, )
return replace_with_bnb_linear(*UpperCamelCase__, **UpperCamelCase__ )
def _a( *UpperCamelCase__ : str, **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''', UpperCamelCase__, )
return set_module_quantized_tensor_to_device(*UpperCamelCase__, **UpperCamelCase__ )
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : Dict =find_tied_parameters(UpperCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] =sum(UpperCamelCase__, [] )
SCREAMING_SNAKE_CASE__ : List[Any] =len(UpperCamelCase__ ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Any =not hasattr(UpperCamelCase__, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(model.named_children() )
SCREAMING_SNAKE_CASE__ : int =[list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : int =set(UpperCamelCase__ ) - set(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Optional[int] =['''.weight''', '''.bias''']
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : str =name.replace(UpperCamelCase__, '''''' )
filtered_module_names.append(UpperCamelCase__ )
return filtered_module_names | 222 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __SCREAMING_SNAKE_CASE :
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : int , __lowercase : Optional[Any] , __lowercase : int=13 , __lowercase : List[str]=7 , __lowercase : Dict=True , __lowercase : Tuple=False , __lowercase : Optional[Any]=99 , __lowercase : str=32 , __lowercase : List[str]=2 , __lowercase : str=4 , __lowercase : Optional[int]=37 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=40 , __lowercase : str=2 , __lowercase : List[Any]=1 , __lowercase : Optional[Any]=0 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =parent
SCREAMING_SNAKE_CASE__ : List[Any] =batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] =seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] =is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_labels
SCREAMING_SNAKE_CASE__ : str =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
SCREAMING_SNAKE_CASE__ : Any =pad_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] =bos_token_id
def __magic_name__ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : Any =tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =prepare_pegasus_inputs_dict(__lowercase , __lowercase , __lowercase )
return config, inputs_dict
def __magic_name__ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =TFPegasusModel(config=__lowercase ).get_decoder()
SCREAMING_SNAKE_CASE__ : List[str] =inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple =input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Tuple =inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE__ : Tuple =inputs_dict['''head_mask''']
SCREAMING_SNAKE_CASE__ : List[str] =1
# first forward pass
SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : str =ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ : int =model(__lowercase , attention_mask=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Any =output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Any]=None, ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : str =tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
snake_case_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
snake_case_ = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] =TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict =ConfigTester(self , config_class=__lowercase )
def __magic_name__ ( self : int ) -> Any:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
snake_case_ = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
snake_case_ = """google/pegasus-xsum"""
@cached_property
def __magic_name__ ( self : Optional[int] ) -> Tuple:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : List[str] , **__lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.translate_src_text(**__lowercase )
assert self.expected_text == generated_words
def __magic_name__ ( self : Optional[Any] , **__lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer(self.src_text , **__lowercase , padding=__lowercase , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowercase , )
SCREAMING_SNAKE_CASE__ : Any =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowercase )
return generated_words
@slow
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
self._assert_generated_batch_equal_expected() | 222 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCAmelCase : Union[str, Any] ='.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCAmelCase : List[str] =[
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = SavedModel()
__SCREAMING_SNAKE_CASE : str = []
with open(os.path.join(lowercase__ , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = json.load(lowercase__ )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
__SCREAMING_SNAKE_CASE : int = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(lowercase__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*lowercase__ , sep='''\n''' )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
__lowerCAmelCase : str =parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 9 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Optional[Any] = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['CLIPFeatureExtractor']
snake_case_ : Dict = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = 'hf-internal-testing/tiny-random-t5'
_UpperCamelCase : str = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[str] = tokenizer('This is me' ,return_tensors='pt' )
_UpperCamelCase : str = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_UpperCamelCase : Optional[Any] = model.generate(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_UpperCamelCase : Optional[Any] = model_reloaded.generate(**lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 'hf-internal-testing/tiny-random-t5'
_UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[str] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase__ ):
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : str = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase__ )
| 236 | 1 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__a = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "maskformer"
lowercase = {"hidden_size": "mask_feature_size"}
lowercase = ["resnet", "swin"]
lowercase = ["detr"]
def __init__( self : Any , snake_case_ : int = 256 , snake_case_ : int = 256 , snake_case_ : float = 0.1 , snake_case_ : bool = False , snake_case_ : Optional[Dict] = None , snake_case_ : Optional[Dict] = None , snake_case_ : float = 0.02 , snake_case_ : float = 1.0 , snake_case_ : float = 1.0 , snake_case_ : float = 1.0 , snake_case_ : float = 20.0 , snake_case_ : Optional[bool] = None , **snake_case_ : int , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
snake_case__ : Optional[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Dict = backbone_config.pop("""model_type""" )
snake_case__ : int = CONFIG_MAPPING[backbone_model_type]
snake_case__ : Optional[int] = config_class.from_dict(snake_case_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
f"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
snake_case__ : Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
snake_case__ : List[str] = (
decoder_config.pop("""model_type""" ) if isinstance(snake_case_ , snake_case_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"Transformer Decoder {decoder_type} not supported, please use one of"
f" {','.join(self.decoders_supported )}" )
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Union[str, Any] = CONFIG_MAPPING[decoder_type]
snake_case__ : Union[str, Any] = config_class.from_dict(snake_case_ )
snake_case__ : Any = backbone_config
snake_case__ : Optional[int] = decoder_config
# main feature dimension for the model
snake_case__ : int = fpn_feature_size
snake_case__ : Any = mask_feature_size
# initializer
snake_case__ : str = init_std
snake_case__ : Optional[Any] = init_xavier_std
# Hungarian matcher && loss
snake_case__ : Optional[int] = cross_entropy_weight
snake_case__ : Dict = dice_weight
snake_case__ : List[str] = mask_weight
snake_case__ : Tuple = use_auxiliary_loss
snake_case__ : Any = no_object_weight
snake_case__ : List[str] = output_auxiliary_logits
snake_case__ : int = self.decoder_config.encoder_attention_heads
snake_case__ : str = self.decoder_config.num_hidden_layers
super().__init__(**snake_case_ )
@classmethod
def lowerCamelCase ( cls : int , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : int ):
return cls(
backbone_config=snake_case_ , decoder_config=snake_case_ , **snake_case_ , )
def lowerCamelCase ( self : Dict ):
snake_case__ : str = copy.deepcopy(self.__dict__ )
snake_case__ : int = self.backbone_config.to_dict()
snake_case__ : List[Any] = self.decoder_config.to_dict()
snake_case__ : int = self.__class__.model_type
return output
| 35 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _UpperCAmelCase ( __a):
__a : Optional[torch.FloatTensor] = None
__a : torch.FloatTensor = None
__a : Optional[Tuple[torch.FloatTensor]] = None
__a : Optional[Tuple[torch.FloatTensor]] = None
class _UpperCAmelCase ( __a):
def __init__( self , _A=1 , _A=0 , _A=2 , _A=5_12 , _A="cls" , _A=False , _A=True , **_A , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCAmelCase : int = project_dim
_UpperCAmelCase : str = pooler_fn
_UpperCAmelCase : Union[str, Any] = learn_encoder
_UpperCAmelCase : Tuple = use_attention_mask
class _UpperCAmelCase ( __a):
__a : str = [R"""pooler""", R"""logit_scale"""]
__a : str = [R"""position_ids""", R"""predictions.decoder.bias"""]
__a : int = """roberta"""
__a : Optional[int] = RobertaSeriesConfig
def __init__( self , _A ) -> List[Any]:
'''simple docstring'''
super().__init__(_A )
_UpperCAmelCase : Dict = XLMRobertaModel(_A )
_UpperCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
_UpperCAmelCase : Optional[int] = getattr(_A , """has_pre_transformation""" , _A )
if self.has_pre_transformation:
_UpperCAmelCase : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
_UpperCAmelCase : Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __snake_case ( self , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Any = self.base_model(
input_ids=_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_attentions=_A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_A , )
if self.has_pre_transformation:
_UpperCAmelCase : Optional[int] = outputs["""hidden_states"""][-2]
_UpperCAmelCase : str = self.pre_LN(_A )
_UpperCAmelCase : str = self.transformation_pre(_A )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_UpperCAmelCase : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 246 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 60_08_51_47_51_43 ):
"""simple docstring"""
try:
_UpperCAmelCase = int(lowercase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_UpperCAmelCase = 2
_UpperCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_UpperCAmelCase = i
while n % i == 0:
_UpperCAmelCase = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30 | """simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30 | 1 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
def __lowercase ( a__ , a__ , a__ , a__ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a__ , a__ , a__=0 , a__=None ):
__SCREAMING_SNAKE_CASE = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__SCREAMING_SNAKE_CASE = math.floor(val / multiple ) * multiple
if x < min_val:
__SCREAMING_SNAKE_CASE = math.ceil(val / multiple ) * multiple
return x
__SCREAMING_SNAKE_CASE = (output_size, output_size) if isinstance(a__ , a__ ) else output_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_image_size(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_size
# determine new height and width
__SCREAMING_SNAKE_CASE = output_height / input_height
__SCREAMING_SNAKE_CASE = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__SCREAMING_SNAKE_CASE = scale_width
else:
# fit height
__SCREAMING_SNAKE_CASE = scale_height
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_height * input_height , multiple=a__ )
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_width * input_width , multiple=a__ )
return (new_height, new_width)
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = False , _A = 1 , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
__SCREAMING_SNAKE_CASE = size if size is not None else {'height': 384, 'width': 384}
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self , _A , _A , _A = False , _A = 1 , _A = PILImageResampling.BICUBIC , _A = None , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(
_A , output_size=(size['height'], size['width']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _A ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def _A ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _A ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
__SCREAMING_SNAKE_CASE = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(_A ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_A , _A ) for image in images]
__SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_A ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(_A ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_A )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 257 | 0 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = OmegaConf.load(lowercase )
_UpperCAmelCase = torch.load(lowercase ,map_location="""cpu""" )["""model"""]
_UpperCAmelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase = {}
_UpperCAmelCase = """first_stage_model."""
for key in keys:
if key.startswith(lowercase ):
_UpperCAmelCase = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase = {}
_UpperCAmelCase = """model.diffusion_model."""
for key in keys:
if key.startswith(lowercase ):
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase = config.model.params.first_stage_config.params
_UpperCAmelCase = config.model.params.unet_config.params
_UpperCAmelCase = VQModel(**lowercase ).eval()
vqvae.load_state_dict(lowercase )
_UpperCAmelCase = UNetLDMModel(**lowercase ).eval()
unet.load_state_dict(lowercase )
_UpperCAmelCase = DDIMScheduler(
timesteps=config.model.params.timesteps ,beta_schedule="""scaled_linear""" ,beta_start=config.model.params.linear_start ,beta_end=config.model.params.linear_end ,clip_sample=lowercase ,)
_UpperCAmelCase = LDMPipeline(lowercase ,lowercase ,lowercase )
pipeline.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 353 | """simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30 | 0 |
from math import sqrt
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def A ( lowercase = 10_000 ) -> int:
'''simple docstring'''
UpperCamelCase = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 222 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = "gpt_neox"
def __init__( self , A_=50_432 , A_=6_144 , A_=44 , A_=64 , A_=24_576 , A_="gelu" , A_=0.25 , A_=10_000 , A_=0.0 , A_=0.0 , A_=0.1 , A_=2_048 , A_=0.02 , A_=1e-5 , A_=True , A_=0 , A_=2 , A_=False , A_=True , A_=None , **A_ , ) -> Tuple:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = rotary_pct
UpperCamelCase = rotary_emb_base
UpperCamelCase = attention_dropout
UpperCamelCase = hidden_dropout
UpperCamelCase = classifier_dropout
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = tie_word_embeddings
UpperCamelCase = use_parallel_residual
UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
UpperCamelCase = self.rope_scaling.get('type' , A_ )
UpperCamelCase = self.rope_scaling.get('factor' , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 222 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Dict ) -> int:
__a = len(lowerCAmelCase__ )
__a = sum(lowerCAmelCase__ )
__a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__a = True
for i in range(1 , s + 1 ):
__a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__a = dp[i][j - 1]
if arr[i - 1] <= j:
__a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__a = s - 2 * j
break
return diff
| 11 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Tuple = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Return True if there is node that has not iterated.
lowercase :Union[str, Any] = [False] * len(lowerCamelCase )
lowercase :Union[str, Any] = []
queue.append(lowerCamelCase )
lowercase :Optional[Any] = True
while queue:
lowercase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
lowercase :Dict = True
lowercase :Dict = u
return visited[t]
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# This array is filled by BFS and to store path
lowercase :Optional[int] = [-1] * (len(lowerCamelCase ))
lowercase :int = 0
while bfs(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :int = float("Inf" )
lowercase :Any = sink
while s != source:
# Find the minimum value in select path
lowercase :Any = min(lowerCamelCase, graph[parent[s]][s] )
lowercase :Dict = parent[s]
max_flow += path_flow
lowercase :Union[str, Any] = sink
while v != source:
lowercase :List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase :Any = parent[v]
return max_flow
_UpperCAmelCase : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_UpperCAmelCase , _UpperCAmelCase : Any = 0, 5
print(ford_fulkerson(graph, source, sink))
| 236 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Tuple=99 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : Any=37 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Tuple=512 , lowerCAmelCase__ : Optional[Any]=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[int]=1000 , ) -> str:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = range_bbox
def snake_case__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCamelCase = bbox[i, j, 3]
_UpperCamelCase = bbox[i, j, 1]
_UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCamelCase = bbox[i, j, 2]
_UpperCamelCase = bbox[i, j, 0]
_UpperCamelCase = t
_UpperCamelCase = tf.convert_to_tensor(lowerCAmelCase__ )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMModel(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMForMaskedLM(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFLayoutLMForSequenceClassification(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFLayoutLMForTokenClassification(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMForQuestionAnswering(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : int = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_snake_case : Tuple = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = True
_snake_case : Any = 1_0
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def snake_case__ ( self : List[str] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFLayoutLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
_UpperCamelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_UpperCamelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
_UpperCamelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_UpperCamelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , bbox=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
# test the sequence output on [0, :3, :3]
_UpperCamelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
_UpperCamelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowerCAmelCase__ , atol=1e-3 ) )
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(
input_ids=lowerCAmelCase__ , bbox=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_UpperCamelCase = outputs.loss
_UpperCamelCase = (2,)
self.assertEqual(loss.shape , lowerCAmelCase__ )
# test the shape of the logits
_UpperCamelCase = outputs.logits
_UpperCamelCase = (2, 2)
self.assertEqual(logits.shape , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(
input_ids=lowerCAmelCase__ , bbox=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
# test the shape of the logits
_UpperCamelCase = outputs.logits
_UpperCamelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Any ) -> Any:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , bbox=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
# test the shape of the logits
_UpperCamelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , lowerCAmelCase__ )
self.assertEqual(outputs.end_logits.shape , lowerCAmelCase__ )
| 287 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Tuple = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : int = 'trocr'
_snake_case : List[str] = ['past_key_values']
_snake_case : Tuple = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any]=50265 , lowerCAmelCase__ : List[Any]=1024 , lowerCAmelCase__ : List[str]=12 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : List[Any]=4096 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : int=512 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Any=2 , **lowerCAmelCase__ : Optional[int] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 287 | 1 |
import collections
import os
import re
from pathlib import Path
__a = 'src/transformers'
# Matches is_xxx_available()
__a = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__a = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__a = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__a = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__a = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__a = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__a = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__a = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__a = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__a = re.compile(r'^\s*try:')
# Catches a line with else:
__a = re.compile(r'^\s*else:')
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
if _re_test_backend.search(snake_case__ ) is None:
return None
lowercase_ = [b[0] for b in _re_backend.findall(snake_case__ )]
backends.sort()
return "_and_".join(snake_case__ )
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ = f.readlines()
lowercase_ = 0
while line_index < len(snake_case__ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase_ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__ ):
lowercase_ = _re_one_line_import_struct.search(snake_case__ ).groups()[0]
lowercase_ = re.findall(r'''\[([^\]]+)\]''' , snake_case__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase_ = _re_import_struct_key_value.search(snake_case__ )
if single_line_import_search is not None:
lowercase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase_ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase_ = lines[line_index]
if _re_import_struct_add_one.search(snake_case__ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case__ ) is not None:
lowercase_ = _re_import_struct_add_many.search(snake_case__ ).groups()[0].split(''', ''' )
lowercase_ = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif _re_between_brackets.search(snake_case__ ) is not None:
lowercase_ = _re_between_brackets.search(snake_case__ ).groups()[0].split(''', ''' )
lowercase_ = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif _re_quote_object.search(snake_case__ ) is not None:
objects.append(_re_quote_object.search(snake_case__ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase_ = []
while (
line_index < len(snake_case__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase_ = lines[line_index]
lowercase_ = _re_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase_ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase_ = lines[line_index]
lowercase_ = _re_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( snake_case__: Any , snake_case__: Tuple ):
'''simple docstring'''
def find_duplicates(snake_case__: Optional[int] ):
return [k for k, v in collections.Counter(snake_case__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase_ = []
for key in import_dict_objects.keys():
lowercase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase_ = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def a ( ):
'''simple docstring'''
lowercase_ = []
for root, _, files in os.walk(snake_case__ ):
if "__init__.py" in files:
lowercase_ = os.path.join(snake_case__ , '''__init__.py''' )
lowercase_ = parse_init(snake_case__ )
if objects is not None:
lowercase_ = analyze_results(*snake_case__ )
if len(snake_case__ ) > 0:
lowercase_ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(snake_case__ ) )
if len(snake_case__ ) > 0:
raise ValueError('''\n\n'''.join(snake_case__ ) )
def a ( ):
'''simple docstring'''
lowercase_ = []
for path, directories, files in os.walk(snake_case__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(snake_case__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase_ = str((Path(snake_case__ ) / folder).relative_to(snake_case__ ) )
lowercase_ = short_path.replace(os.path.sep , '''.''' )
submodules.append(snake_case__ )
for fname in files:
if fname == "__init__.py":
continue
lowercase_ = str((Path(snake_case__ ) / fname).relative_to(snake_case__ ) )
lowercase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(snake_case__ )
return submodules
__a = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def a ( ):
'''simple docstring'''
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase_ = direct_transformers_import(snake_case__ )
lowercase_ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(snake_case__ , '''__init__.py''' ) , '''r''' ) as f:
lowercase_ = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , snake_case__ ) ) )
lowercase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(snake_case__ ) > 0:
lowercase_ = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 30 |
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
lowerCAmelCase_ = """gptj"""
lowerCAmelCase_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=50400 , A_=2048 , A_=4096 , A_=28 , A_=16 , A_=64 , A_=None , A_="gelu_new" , A_=0.0 , A_=0.0 , A_=0.0 , A_=1e-5 , A_=0.02 , A_=True , A_=50256 , A_=50256 , A_=False , **A_ , )-> str:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = rotary_dim
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
def __init__( self , A_ , A_ = "default" , A_ = None , A_ = False , )-> Tuple:
'''simple docstring'''
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , 'pad_token_id' , snake_case__ ):
# TODO: how to do that better?
UpperCamelCase = 0
@property
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return self._config.n_head
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , )-> List[str]:
'''simple docstring'''
UpperCamelCase = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase = ordered_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
return 13
| 353 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase : Union[str, Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCAmelCase : List[str] = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowerCAmelCase : Dict = '▁'
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , )-> None:
'''simple docstring'''
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase = len(self.sp_model ) - 1
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(A_ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A_ )
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = ''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
UpperCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __getstate__( self )-> int:
'''simple docstring'''
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 251 | 0 |
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : Optional[int] = [0] * len(snake_case__ )
UpperCamelCase : Any = []
UpperCamelCase : Optional[Any] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__lowerCamelCase : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 52 |
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = os.path.abspath(lowerCamelCase_ )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
_lowercase : List[Any] = tf.train.list_variables(lowerCamelCase_ )
_lowercase : List[Any] = []
_lowercase : List[Any] = []
_lowercase : Dict = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_lowercase : Dict = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
_lowercase : str = name[1:]
# figure out how many levels deep the name is
_lowercase : List[Any] = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(lowerCamelCase_ )
# read data
_lowercase : List[Any] = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ )
names.append('/'.join(lowerCamelCase_ ) )
arrays.append(lowerCamelCase_ )
logger.info(F'''Read a total of {len(lowerCamelCase_ ):,} layers''' )
# Sanity check
if len(set(lowerCamelCase_ ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(lowerCamelCase_ ) )})''' )
_lowercase : List[Any] = list(set(lowerCamelCase_ ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : int = full_name.split('/' )
_lowercase : Tuple = model
_lowercase : str = []
for i, m_name in enumerate(lowerCamelCase_ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
_lowercase : Union[str, Any] = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
_lowercase : str = getattr(lowerCamelCase_ , 'embeddings' )
_lowercase : Optional[Any] = getattr(lowerCamelCase_ , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
_lowercase : Tuple = getattr(lowerCamelCase_ , 'encoder' )
_lowercase : int = getattr(lowerCamelCase_ , 'layer' )
_lowercase : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
_lowercase : Tuple = getattr(lowerCamelCase_ , 'pooler' )
_lowercase : Tuple = getattr(lowerCamelCase_ , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
_lowercase : Any = getattr(lowerCamelCase_ , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
_lowercase : int = getattr(lowerCamelCase_ , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
_lowercase : int = getattr(lowerCamelCase_ , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
_lowercase : List[str] = getattr(lowerCamelCase_ , 'token_type_embeddings' )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append('weight' )
_lowercase : Optional[Any] = getattr(lowerCamelCase_ , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
_lowercase : Optional[int] = getattr(lowerCamelCase_ , 'attention' )
_lowercase : Optional[Any] = getattr(lowerCamelCase_ , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
_lowercase : List[Any] = getattr(lowerCamelCase_ , 'attention' )
_lowercase : Optional[int] = getattr(lowerCamelCase_ , 'output' )
_lowercase : Union[str, Any] = getattr(lowerCamelCase_ , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
_lowercase : str = getattr(lowerCamelCase_ , 'attention' )
_lowercase : Dict = getattr(lowerCamelCase_ , 'output' )
_lowercase : int = getattr(lowerCamelCase_ , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
_lowercase : Union[str, Any] = getattr(lowerCamelCase_ , 'output' )
_lowercase : Union[str, Any] = getattr(lowerCamelCase_ , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
_lowercase : Dict = getattr(lowerCamelCase_ , 'output' )
_lowercase : Optional[int] = getattr(lowerCamelCase_ , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
_lowercase : Tuple = getattr(lowerCamelCase_ , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
_lowercase : int = getattr(lowerCamelCase_ , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
_lowercase : Optional[Any] = getattr(lowerCamelCase_ , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
_lowercase : int = getattr(lowerCamelCase_ , 'intermediate' )
_lowercase : int = getattr(lowerCamelCase_ , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
_lowercase : List[str] = getattr(lowerCamelCase_ , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
_lowercase : Optional[Any] = getattr(lowerCamelCase_ , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
_lowercase : Optional[int] = getattr(lowerCamelCase_ , 'weight' )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
_lowercase : Optional[Any] = '.'.join(lowerCamelCase_ )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , lowerCamelCase_ ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , lowerCamelCase_ ):
_lowercase : Tuple = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_lowercase : List[str] = array.transpose()
if pointer.shape == array.shape:
_lowercase : List[str] = torch.from_numpy(lowerCamelCase_ )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
_lowercase : Any = BertConfig.from_json_file(lowerCamelCase_ )
_lowercase : Any = BertModel(lowerCamelCase_ )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 84 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ["DeiTFeatureExtractor"]
SCREAMING_SNAKE_CASE : Dict = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 84 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : int ):
_A : Tuple = len(UpperCamelCase__ )
_A : List[str] = sum(UpperCamelCase__ )
_A : Tuple = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_A : int = True
for i in range(1 , s + 1 ):
_A : Dict = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_A : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_A : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_A : Optional[int] = s - 2 * j
break
return diff
| 11 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 1 |
from math import factorial, pi
def _A ( lowercase , lowercase = 30 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
a =float(lowerCAmelCase__ )
a =theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase__ ) )
def _A ( lowercase , lowercase = 30 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
a =float(lowerCAmelCase__ )
a =theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5)) | 360 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : str = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ : int = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Optional[Any] = {
"""google/rembert""": 2_5_6,
}
lowerCamelCase_ : Optional[Any] = """▁"""
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RemBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A=True , __A=False , __A="[CLS]" , __A="[SEP]" , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , **__A , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
a =do_lower_case
a =remove_space
a =keep_accents
a =vocab_file
a =False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__A ) )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,) | 215 | 0 |
_lowerCamelCase =[
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 287 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 1 |
from collections import defaultdict
from math import gcd
def lowerCamelCase__ (__lowerCamelCase = 1500000 ):
_SCREAMING_SNAKE_CASE : defaultdict = defaultdict(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, __lowerCamelCase, 2 ):
if gcd(__lowerCamelCase, __lowerCamelCase ) > 1:
continue
_SCREAMING_SNAKE_CASE : List[str] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__lowerCamelCase, limit + 1, __lowerCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"{solution() = }") | 355 |
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 325 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__A =False
try:
__A =_is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase = None , lowercase = [] ) -> Optional[int]:
lowerCamelCase_ = 0
lowerCamelCase_ = choices
lowerCamelCase_ = prompt
if sys.platform == "win32":
lowerCamelCase_ = "*"
else:
lowerCamelCase_ = "➔ "
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "" ) -> int:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , lowercase )
else:
forceWrite(self.choices[index] , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(lowercase )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = 1 ) -> List[Any]:
lowerCamelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowercase )
move_cursor(lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def SCREAMING_SNAKE_CASE_( self ) -> int:
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowercase )] for number in range(10 )] )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = int(chr(self.current_selection ) )
lowerCamelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , lowercase )
else:
return
else:
return
def SCREAMING_SNAKE_CASE_( self , lowercase = 0 ) -> List[str]:
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowerCamelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowercase )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowerCamelCase_ = int(builtins.input() )
except ValueError:
lowerCamelCase_ = default_choice
else:
lowerCamelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(lowercase , "\n" )
return choice
| 19 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MaskFormerFeatureExtractor"]
UpperCamelCase_ = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
UpperCamelCase_ = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 251 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : List[str] ) -> Tuple:
assert isinstance(_UpperCamelCase, _UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : List[Any], _UpperCamelCase : List[Any] ) -> Dict:
A_ = tmp_path / '''cache'''
A_ = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ = TextDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase, keep_in_memory=_UpperCamelCase ).read()
_check_text_dataset(_UpperCamelCase, _UpperCamelCase )
@pytest.mark.parametrize(
'''features''', [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
], )
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : Optional[Any], _UpperCamelCase : int ) -> str:
A_ = tmp_path / '''cache'''
A_ = {'''text''': '''string'''}
A_ = features.copy() if features else default_expected_features
A_ = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = TextDatasetReader(_UpperCamelCase, features=_UpperCamelCase, cache_dir=_UpperCamelCase ).read()
_check_text_dataset(_UpperCamelCase, _UpperCamelCase )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _UpperCAmelCase ( _UpperCamelCase : List[str], _UpperCamelCase : Dict, _UpperCamelCase : List[str] ) -> List[str]:
A_ = tmp_path / '''cache'''
A_ = {'''text''': '''string'''}
A_ = TextDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase, split=_UpperCamelCase ).read()
_check_text_dataset(_UpperCamelCase, _UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def _UpperCAmelCase ( _UpperCamelCase : Dict, _UpperCamelCase : Any, _UpperCamelCase : List[str] ) -> List[str]:
if issubclass(_UpperCamelCase, _UpperCamelCase ):
A_ = text_path
elif issubclass(_UpperCamelCase, _UpperCamelCase ):
A_ = [text_path]
A_ = tmp_path / '''cache'''
A_ = {'''text''': '''string'''}
A_ = TextDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase ).read()
_check_text_dataset(_UpperCamelCase, _UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : Dict, _UpperCamelCase : List[str]=("train",) ) -> List[Any]:
assert isinstance(_UpperCamelCase, _UpperCamelCase )
for split in splits:
A_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : List[str], _UpperCamelCase : Tuple ) -> Any:
A_ = tmp_path / '''cache'''
A_ = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ = TextDatasetReader({'''train''': text_path}, cache_dir=_UpperCamelCase, keep_in_memory=_UpperCamelCase ).read()
_check_text_datasetdict(_UpperCamelCase, _UpperCamelCase )
@pytest.mark.parametrize(
'''features''', [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
], )
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : List[str], _UpperCamelCase : Union[str, Any] ) -> Tuple:
A_ = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
A_ = {'''text''': '''string'''}
A_ = features.copy() if features else default_expected_features
A_ = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = TextDatasetReader({'''train''': text_path}, features=_UpperCamelCase, cache_dir=_UpperCamelCase ).read()
_check_text_datasetdict(_UpperCamelCase, _UpperCamelCase )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : Optional[int], _UpperCamelCase : Optional[int] ) -> Any:
if split:
A_ = {split: text_path}
else:
A_ = '''train'''
A_ = {'''train''': text_path, '''test''': text_path}
A_ = tmp_path / '''cache'''
A_ = {'''text''': '''string'''}
A_ = TextDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase ).read()
_check_text_datasetdict(_UpperCamelCase, _UpperCamelCase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 351 | '''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case : str = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Dict:
A_ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __A ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __A ( self ) -> str:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def __A ( self ) -> List[str]:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict:
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ = False
return models_are_equal
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> List[Any]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> Dict:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18 | 0 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
__UpperCAmelCase = False
__UpperCAmelCase = False
def _snake_case ( lowercase__ : Namespace ) -> str:
'''simple docstring'''
return TrainCommand(lowercase__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
@staticmethod
def __lowerCAmelCase ( __A ) -> int:
lowerCAmelCase_ :List[str] = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=__A , required=__A , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=__A , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=__A , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=__A , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=__A , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=__A , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=__A , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=__A , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=__A , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=__A , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=__A , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=__A , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=__A , default=1E-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = logging.get_logger("""transformers-cli/training""" )
lowerCAmelCase_ :int = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__A )
lowerCAmelCase_ :List[Any] = args.output
lowerCAmelCase_ :int = args.column_label
lowerCAmelCase_ :int = args.column_text
lowerCAmelCase_ :Union[str, Any] = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
lowerCAmelCase_ :Tuple = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
lowerCAmelCase_ :Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase_ :str = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
lowerCAmelCase_ :List[Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase_ :Optional[Any] = args.validation_split
lowerCAmelCase_ :str = args.train_batch_size
lowerCAmelCase_ :str = args.valid_batch_size
lowerCAmelCase_ :Optional[int] = args.learning_rate
lowerCAmelCase_ :Union[str, Any] = args.adam_epsilon
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowerCAmelCase ( self ) -> Tuple:
raise NotImplementedError
def __lowerCAmelCase ( self ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 84 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :int = nn.ModuleList(__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ):
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet(
__A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , )
# merge samples
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample
else:
lowerCAmelCase_ :str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__A , __A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]:
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , )
idx += 1
lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}"""
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]:
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCAmelCase_ :List[Any] = pretrained_model_path
while os.path.isdir(__A ):
lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A )
controlnets.append(__A )
idx += 1
lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" )
if len(__A ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__A )
| 84 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : int = logging.get_logger(__name__)
__lowercase : List[str] = {'vocab_file': 'vocab.json'}
__lowercase : List[Any] = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
__lowercase : Tuple = {'mgp-str': 27}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __a , __a="[GO]" , __a="[GO]" , __a="[s]" , __a="[GO]" , **__a ):
'''simple docstring'''
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a , )
with open(__a , encoding='utf-8' ) as vocab_handle:
__a : Dict = json.load(__a )
__a : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.vocab )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : str = []
for s in text:
char_tokens.extend(__a )
return char_tokens
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.decoder.get(__a )
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error('Vocabulary path ({}) should be a directory'.format(__a ) )
return
__a : Any = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '\n' )
return (vocab_file,)
| 27 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = RemBertConfig.from_json_file(lowerCAmelCase_ )
print("""Building PyTorch model from configuration: {}""".format(str(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Any = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowerCAmelCase_ ) )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ : Any = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 215 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> List[str]:
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , _UpperCamelCase , )
super().__init__(args=_UpperCamelCase , **_UpperCamelCase )
| 145 |
def lowercase__ ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[Any] ):
'''simple docstring'''
if index == r:
for j in range(__snake_case ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase_ : int = arr[i]
combination_util(__snake_case , __snake_case , __snake_case , index + 1 , __snake_case , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__snake_case , __snake_case , __snake_case , 0 , __snake_case , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__UpperCAmelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 145 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowerCAmelCase__ ):
__lowerCamelCase : int = "xlm-roberta"
def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class lowerCAmelCase_ ( lowerCAmelCase__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 158 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCamelCase ( _A : Union[dict, list, tuple, torch.Tensor] ) ->List[Tuple[int, ...]]:
"""simple docstring"""
lowerCamelCase_ =[]
if isinstance(_A , _A ):
for v in tree.values():
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __UpperCamelCase ( _A : int , _A : Tuple[int, ...] ) ->Tuple[int, ...]:
"""simple docstring"""
lowerCamelCase_ =[]
for d in reversed(_A ):
idx.append(flat_idx % d )
lowerCamelCase_ =flat_idx // d
return tuple(reversed(_A ) )
@torch.jit.ignore
def __UpperCamelCase ( _A : Sequence[int] , _A : Sequence[int] , _A : Sequence[int] , _A : Optional[Sequence[bool]] = None , _A : Optional[Sequence[bool]] = None , ) ->List[Tuple[slice, ...]]:
"""simple docstring"""
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_A : List[bool] ) -> None:
lowerCamelCase_ =True
for i in range(len(_A ) ):
lowerCamelCase_ =-1 * (i + 1)
l[reversed_idx] &= tally
lowerCamelCase_ =l[reversed_idx]
if start_edges is None:
lowerCamelCase_ =[s == 0 for s in start]
reduce_edge_list(_A )
if end_edges is None:
lowerCamelCase_ =[e == (d - 1) for e, d in zip(_A , _A )]
reduce_edge_list(_A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_A ) == 0:
return [()]
elif len(_A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCamelCase_ =[]
lowerCamelCase_ =[]
# Dimensions common to start and end can be selected directly
for s, e in zip(_A , _A ):
if s == e:
path_list.append(slice(_A , s + 1 ) )
else:
break
lowerCamelCase_ =tuple(_A )
lowerCamelCase_ =len(_A )
# start == end, and we're done
if divergence_idx == len(_A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ =start[divergence_idx]
return tuple(
path + (slice(_A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ =end[divergence_idx]
return tuple(
path + (slice(_A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCamelCase_ =end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __UpperCamelCase ( _A : torch.Tensor , _A : int , _A : int , _A : int ) ->torch.Tensor:
"""simple docstring"""
lowerCamelCase_ =t.shape[:no_batch_dims]
lowerCamelCase_ =list(_flat_idx_to_idx(_A , _A ) )
# _get_minimal_slice_set is inclusive
lowerCamelCase_ =list(_flat_idx_to_idx(flat_end - 1 , _A ) )
# Get an ordered list of slices to perform
lowerCamelCase_ =_get_minimal_slice_set(
_A , _A , _A , )
lowerCamelCase_ =[t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __UpperCamelCase ( _A : Callable , _A : Dict[str, Any] , _A : int , _A : int , _A : bool = False , _A : Any = None , _A : bool = False , ) ->Any:
"""simple docstring"""
if not (len(_A ) > 0):
raise ValueError("""Must provide at least one input""" )
lowerCamelCase_ =[shape[:no_batch_dims] for shape in _fetch_dims(_A )]
lowerCamelCase_ =tuple([max(_A ) for s in zip(*_A )] )
def _prep_inputs(_A : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCamelCase_ =t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCamelCase_ =t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCamelCase_ =t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCamelCase_ =tensor_tree_map(_prep_inputs , _A )
lowerCamelCase_ =None
if _out is not None:
lowerCamelCase_ =tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCamelCase_ =1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCamelCase_ =flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_A : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCamelCase_ =0
lowerCamelCase_ =prepped_outputs
for _ in range(_A ):
# Chunk the input
if not low_mem:
lowerCamelCase_ =_select_chunk
else:
lowerCamelCase_ =partial(
_chunk_slice , flat_start=_A , flat_end=min(_A , i + chunk_size ) , no_batch_dims=len(_A ) , )
lowerCamelCase_ =tensor_tree_map(_A , _A )
# Run the layer on the chunk
lowerCamelCase_ =layer(**_A )
# Allocate space for the output
if out is None:
lowerCamelCase_ =tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _A )
# Put the chunk in its pre-allocated space
if isinstance(_A , _A ):
def assign(_A : dict , _A : dict ) -> None:
for k, v in da.items():
if isinstance(_A , _A ):
assign(_A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCamelCase_ =da[k]
assign(_A , _A )
elif isinstance(_A , _A ):
for xa, xa in zip(_A , _A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCamelCase_ =xa
elif isinstance(_A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCamelCase_ =output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
lowerCamelCase_ =tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , _A )
return out
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE = 512 , )-> List[str]:
lowerCamelCase_ =max_chunk_size
lowerCamelCase_ =None
lowerCamelCase_ =None
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCamelCase_ =[2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCamelCase_ =[c for c in candidates if c > min_chunk_size]
lowerCamelCase_ =[min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_SCREAMING_SNAKE_CASE ) -> bool:
try:
with torch.no_grad():
fn(*_SCREAMING_SNAKE_CASE , chunk_size=_SCREAMING_SNAKE_CASE )
return True
except RuntimeError:
return False
lowerCamelCase_ =0
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while i > min_viable_chunk_size_index:
lowerCamelCase_ =test_chunk_size(candidates[i] )
if not viable:
lowerCamelCase_ =(min_viable_chunk_size_index + i) // 2
else:
lowerCamelCase_ =i
lowerCamelCase_ =(i + len(_SCREAMING_SNAKE_CASE ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> bool:
lowerCamelCase_ =True
for aa, aa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert type(_SCREAMING_SNAKE_CASE ) == type(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
consistent &= self._compare_arg_caches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =[v for _, v in sorted(aa.items() , key=lambda _SCREAMING_SNAKE_CASE : x[0] )]
lowerCamelCase_ =[v for _, v in sorted(aa.items() , key=lambda _SCREAMING_SNAKE_CASE : x[0] )]
consistent &= self._compare_arg_caches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> int:
lowerCamelCase_ =True
lowerCamelCase_ =tree_map(lambda _SCREAMING_SNAKE_CASE : a.shape if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) else a , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self._compare_arg_caches(self.cached_arg_data , _SCREAMING_SNAKE_CASE )
else:
# Otherwise, we can reuse the precomputed value
lowerCamelCase_ =False
if not consistent:
lowerCamelCase_ =self._determine_favorable_chunk_size(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 49 |
from string import ascii_uppercase
__A : int = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase ( _A : int , _A : int ) ->str:
"""simple docstring"""
if isinstance(_A , _A ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(_A , _A ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(_A , _A ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowerCamelCase_ =""""""
lowerCamelCase_ =0
lowerCamelCase_ =0
while div != 1:
lowerCamelCase_ , lowerCamelCase_ =divmod(_A , _A )
if base >= 11 and 9 < mod < 36:
lowerCamelCase_ =ALPHABET_VALUES[str(_A )]
else:
lowerCamelCase_ =str(_A )
new_value += actual_value
lowerCamelCase_ =num // base
lowerCamelCase_ =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_A )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 49 | 1 |
'''simple docstring'''
import cva
import numpy as np
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if k in (0.0_4, 0.0_6):
lowercase__ : Union[str, Any] = k
lowercase__ : str = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> List[Any]:
return str(self.k )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : Dict = cva.imread(_A , 0 )
lowercase__ : int = img.shape
lowercase__ : list[list[int]] = []
lowercase__ : Optional[Any] = img.copy()
lowercase__ : Optional[Any] = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
lowercase__ : List[Any] = np.gradient(_A )
lowercase__ : str = dx**2
lowercase__ : Optional[Any] = dy**2
lowercase__ : Optional[Any] = dx * dy
lowercase__ : Optional[Any] = 0.0_4
lowercase__ : str = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
lowercase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ : int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ : int = (wxx * wyy) - (wxy**2)
lowercase__ : Optional[int] = wxx + wyy
lowercase__ : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a: Optional[int] = HarrisCorner(0.04, 3)
__a: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 198 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
lowerCAmelCase_ :Any = self.transformer_dir
shutil.copy(
os.path.join(__A , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def __lowerCAmelCase ( self , __A , __A , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ :str = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase_ :List[str] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase_ :List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase_ :Union[str, Any] = black.format_str(__A , mode=__A )
lowerCAmelCase_ :List[str] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__A , """w""" , newline="""\n""" ) as f:
f.write(__A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__A )
with open(__A , """r""" ) as f:
self.assertTrue(f.read() , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __A , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __A ) , )
# Copy consistency with a really long name
lowerCAmelCase_ :int = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("""Bert""" , __A , __A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __A , overwrite_result=re.sub("""Bert""" , """TestModel""" , __A ) , )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Any = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
lowerCAmelCase_ :str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
lowerCAmelCase_ :List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCAmelCase_ :Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
lowerCAmelCase_ , lowerCAmelCase_ :Any = check_copies.convert_to_localized_md(
__A , __A , localized_readme["""format_model_list"""] )
self.assertFalse(__A )
self.assertEqual(__A , __A )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = check_copies.convert_to_localized_md(
__A , __A , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__A )
lowerCAmelCase_ :List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
lowerCAmelCase_ :Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCAmelCase_ :Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCAmelCase_ , lowerCAmelCase_ :int = check_copies.convert_to_localized_md(
__A , __A , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__A , __A )
| 365 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 0 |
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def _lowerCamelCase( lowercase__ = 1_0_0 ) -> int:
'''simple docstring'''
__lowercase= 1
__lowercase= 2
for i in range(2 , max_n + 1 ):
__lowercase= pre_numerator
__lowercase= 2 * i // 3 if i % 3 == 0 else 1
__lowercase= cur_numerator
__lowercase= e_cont * pre_numerator + temp
return sum_digits(lowercase__ )
if __name__ == "__main__":
print(F'{solution() = }')
| 295 |
import os
import numpy
import onnx
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= a.name
__lowercase= b.name
__lowercase= ''
__lowercase= ''
__lowercase= a == b
__lowercase= name_a
__lowercase= name_b
return res
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= list(model.graph.initializer )
__lowercase= list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase= inits[i].name
__lowercase= inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= os.path.dirname(lowercase__ )
__lowercase= os.path.basename(lowercase__ )
__lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) )
__lowercase= list(model.graph.initializer )
__lowercase= set()
__lowercase= {}
__lowercase= []
__lowercase= 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
__lowercase= inits[j].data_type
__lowercase= numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase__ )
total_reduced_size += mem_size
__lowercase= inits[i].name
__lowercase= inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
__lowercase= [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
__lowercase= sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
__lowercase= 'optimized_' + model_file_name
__lowercase= os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model
| 295 | 1 |
"""simple docstring"""
import itertools
import math
def lowercase_ ( _lowerCamelCase: int ) -> Tuple:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( ) -> str:
'''simple docstring'''
__lowerCamelCase : Tuple = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE_ ):
yield num
num += 1
def lowercase_ ( _lowerCamelCase: int = 10001 ) -> List[str]:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 363 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 100 ) -> int:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = set()
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Optional[Any] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
__lowerCamelCase : Union[str, Any] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip()))) | 64 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: Dict ):
_UpperCAmelCase : Tuple = torch.load(a_, map_location="cpu" )
if "model" in sd.keys():
_UpperCAmelCase : Tuple = torch.load(a_, map_location="cpu" )["model"]
# pop unnecessary weights
_UpperCAmelCase : int = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(a_ )
_UpperCAmelCase : int = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_UpperCAmelCase : List[Any] = sd.pop(a_ )
_UpperCAmelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_UpperCAmelCase : Dict = sd[key]
# We split QKV in separate Q,K,V
_UpperCAmelCase : List[str] = key.replace(".qkv_proj.", ".q_proj." )
_UpperCAmelCase : Union[str, Any] = key.replace(".qkv_proj.", ".k_proj." )
_UpperCAmelCase : str = key.replace(".qkv_proj.", ".v_proj." )
_UpperCAmelCase : Dict = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = torch.split(a_, depth // 3, dim=0 )
_UpperCAmelCase : List[str] = q
_UpperCAmelCase : Dict = k
_UpperCAmelCase : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def __UpperCAmelCase ( a_: Union[str, Any], a_: str, a_: List[str]=None ):
_UpperCAmelCase : List[str] = load_checkpoint(a_ )
if config is not None:
_UpperCAmelCase : List[Any] = OPTConfig.from_pretrained(a_ )
else:
_UpperCAmelCase : Dict = OPTConfig()
_UpperCAmelCase : Union[str, Any] = OPTModel(a_ ).half().eval()
model.load_state_dict(a_ )
# Check results
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__a = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 145 | '''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ ) | 145 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase = get_logger()
UpperCamelCase = None
class _lowerCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
super().__init__(features=_SCREAMING_SNAKE_CASE )
import jax
from jaxlib.xla_client import Device
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(_SCREAMING_SNAKE_CASE )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
A_ : Union[str, Any] = device if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ : List[str] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
A_ : Optional[Any] = str(jax.devices()[0] )
A_ : Optional[Any] = jnp_array_kwargs
@staticmethod
def _snake_case ( )->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(_SCREAMING_SNAKE_CASE ): device for device in jax.devices()}
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(_SCREAMING_SNAKE_CASE , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_SCREAMING_SNAKE_CASE , axis=0 )
return column
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_SCREAMING_SNAKE_CASE , (str, bytes, type(_SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(_SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ : str = {}
if isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ : Tuple = {'''dtype''': jnp.intaa}
else:
A_ : Any = {'''dtype''': jnp.intaa}
elif isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ : Any = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
A_ : Dict = np.asarray(_SCREAMING_SNAKE_CASE )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_SCREAMING_SNAKE_CASE , '''__array__''' ) and not isinstance(_SCREAMING_SNAKE_CASE , jax.Array ):
A_ : int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , _SCREAMING_SNAKE_CASE , map_list=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Mapping:
'''simple docstring'''
A_ : Optional[int] = self.numpy_arrow_extractor().extract_row(_SCREAMING_SNAKE_CASE )
A_ : List[str] = self.python_features_decoder.decode_row(_SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->"jax.Array":
'''simple docstring'''
A_ : Tuple = self.numpy_arrow_extractor().extract_column(_SCREAMING_SNAKE_CASE )
A_ : Dict = self.python_features_decoder.decode_column(_SCREAMING_SNAKE_CASE , pa_table.column_names[0] )
A_ : List[Any] = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
A_ : Any = self._consolidate(_SCREAMING_SNAKE_CASE )
return column
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Mapping:
'''simple docstring'''
A_ : List[str] = self.numpy_arrow_extractor().extract_batch(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = self.python_features_decoder.decode_batch(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
for column_name in batch:
A_ : Dict = self._consolidate(batch[column_name] )
return batch
| 65 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )->Any:
'''simple docstring'''
A_ : List[Any] = parent
A_ : int = batch_size
A_ : str = seq_length
A_ : int = is_training
A_ : Any = use_token_type_ids
A_ : Union[str, Any] = use_labels
A_ : Any = vocab_size
A_ : Dict = hidden_size
A_ : Dict = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : Dict = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : str = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : Union[str, Any] = num_labels
A_ : List[str] = num_choices
A_ : Union[str, Any] = scope
A_ : Any = self.vocab_size - 1
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Any = None
if self.use_token_type_ids:
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : str = None
A_ : Union[str, Any] = None
A_ : Optional[int] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : Optional[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : int = OpenAIGPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : int = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
A_ : Tuple = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
A_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : int = OpenAIGPTLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Tuple = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : List[Any] = OpenAIGPTDoubleHeadsModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : str = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : Any = self.num_labels
A_ : List[Any] = OpenAIGPTForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Optional[int] = config_and_inputs
A_ : int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Optional[int]:
'''simple docstring'''
A_ : Optional[Any] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
A_ : List[Any] = inputs_dict['''labels''']
A_ : Any = inputs_dict['''labels''']
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
A_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Any = OpenAIGPTModelTester(self )
A_ : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = OpenAIGPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president is
A_ : Union[str, Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : Dict = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].tolist() , _SCREAMING_SNAKE_CASE )
| 65 | 1 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int]):
'''simple docstring'''
self.test()
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = 0
__a = False
while not completed:
if counter == 1:
self.reset()
__a = self.advance()
if not self.does_advance(__SCREAMING_SNAKE_CASE):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''')
__a , __a , __a = self.update(__SCREAMING_SNAKE_CASE)
counter += 1
if counter > 10_000:
raise Exception('''update() does not fulfill the constraint.''')
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''')
@abstractmethod
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False):
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[int]):
'''simple docstring'''
super(__SCREAMING_SNAKE_CASE , self).__init__()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or len(__SCREAMING_SNAKE_CASE) == 0:
raise ValueError(F'`token_ids` has to be a non-empty list, but is {token_ids}.')
if any((not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or token_id < 0) for token_id in token_ids):
raise ValueError(F'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.')
__a = token_ids
__a = len(self.token_ids)
__a = -1 # the index of the currently fulfilled step
__a = False
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE)}')
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE)}')
__a = False
__a = False
__a = False
if self.does_advance(__SCREAMING_SNAKE_CASE):
self.fulfilled_idx += 1
__a = True
if self.fulfilled_idx == (self.seqlen - 1):
__a = True
__a = completed
else:
# failed to make progress.
__a = True
self.reset()
return stepped, completed, reset
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = False
__a = 0
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Any=False):
'''simple docstring'''
__a = PhrasalConstraint(self.token_ids)
if stateful:
__a = self.seqlen
__a = self.fulfilled_idx
__a = self.completed
return new_constraint
class _A :
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[List[int]] , __SCREAMING_SNAKE_CASE : Dict=True):
'''simple docstring'''
__a = max([len(__SCREAMING_SNAKE_CASE) for one in nested_token_ids])
__a = {}
for token_ids in nested_token_ids:
__a = root
for tidx, token_id in enumerate(__SCREAMING_SNAKE_CASE):
if token_id not in level:
__a = {}
__a = level[token_id]
if no_subsets and self.has_subsets(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F' {nested_token_ids}.')
__a = root
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.trie
for current_token in current_seq:
__a = start[current_token]
__a = list(start.keys())
return next_tokens
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = self.next_tokens(__SCREAMING_SNAKE_CASE)
return len(__SCREAMING_SNAKE_CASE) == 0
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = list(root.values())
if len(__SCREAMING_SNAKE_CASE) == 0:
return 1
else:
return sum([self.count_leaves(__SCREAMING_SNAKE_CASE) for nn in next_nodes])
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.count_leaves(__SCREAMING_SNAKE_CASE)
return len(__SCREAMING_SNAKE_CASE) != leaf_count
class _A ( __UpperCAmelCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[List[int]]):
'''simple docstring'''
super(__SCREAMING_SNAKE_CASE , self).__init__()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or len(__SCREAMING_SNAKE_CASE) == 0:
raise ValueError(F'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.')
if any(not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for token_ids in nested_token_ids):
raise ValueError(F'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.')
if any(
any((not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids):
raise ValueError(
F'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.')
__a = DisjunctiveTrie(__SCREAMING_SNAKE_CASE)
__a = nested_token_ids
__a = self.trie.max_height
__a = []
__a = False
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.trie.next_tokens(self.current_seq)
if len(__SCREAMING_SNAKE_CASE) == 0:
return None
else:
return token_list
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE)}')
__a = self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE)}')
__a = False
__a = False
__a = False
if self.does_advance(__SCREAMING_SNAKE_CASE):
self.current_seq.append(__SCREAMING_SNAKE_CASE)
__a = True
else:
__a = True
self.reset()
__a = self.trie.reached_leaf(self.current_seq)
__a = completed
return stepped, completed, reset
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = False
__a = []
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str=False):
'''simple docstring'''
__a = DisjunctiveConstraint(self.token_ids)
if stateful:
__a = self.seqlen
__a = self.current_seq
__a = self.completed
return new_constraint
class _A :
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[Constraint]):
'''simple docstring'''
__a = constraints
# max # of steps required to fulfill a given constraint
__a = max([c.seqlen for c in constraints])
__a = len(__SCREAMING_SNAKE_CASE)
__a = False
self.init_state()
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = []
__a = None
__a = [constraint.copy(stateful=__SCREAMING_SNAKE_CASE) for constraint in self.constraints]
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__a = constraint.advance()
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
token_list.append(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
token_list.extend(__SCREAMING_SNAKE_CASE)
else:
__a = self.inprogress_constraint.advance()
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
token_list.append(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
token_list.extend(__SCREAMING_SNAKE_CASE)
if len(__SCREAMING_SNAKE_CASE) == 0:
return None
else:
return token_list
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[List[int]]):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__a , __a = self.add(__SCREAMING_SNAKE_CASE)
# the entire list of constraints are fulfilled
if self.completed:
break
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(F'`token_id` should be an `int`, but is `{token_id}`.')
__a , __a = False, False
if self.completed:
__a = True
__a = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__a , __a , __a = self.inprogress_constraint.update(__SCREAMING_SNAKE_CASE)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__SCREAMING_SNAKE_CASE))
__a = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
__a = None
if len(self.pending_constraints) == 0:
# we're done!
__a = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(__SCREAMING_SNAKE_CASE):
__a , __a , __a = pending_constraint.update(__SCREAMING_SNAKE_CASE)
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''')
if complete:
self.complete_constraints.append(__SCREAMING_SNAKE_CASE)
__a = None
if not complete and stepped:
__a = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__a = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__a = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : int=True):
'''simple docstring'''
__a = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__a = [
constraint.copy(stateful=__SCREAMING_SNAKE_CASE) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__a = self.inprogress_constraint.copy(stateful=__SCREAMING_SNAKE_CASE)
__a = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 49 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Text(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
| 49 | 1 |
from __future__ import annotations
UpperCamelCase__ = list[tuple[int, int]]
UpperCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A :
def __init__(self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = pos_x
UpperCAmelCase__ = pos_y
UpperCAmelCase__ = (pos_y, pos_x)
UpperCAmelCase__ = goal_x
UpperCAmelCase__ = goal_y
UpperCAmelCase__ = g_cost
UpperCAmelCase__ = parent
UpperCAmelCase__ = self.calculate_heuristic()
def lowercase_ (self : Union[str, Any] ) -> float:
"""simple docstring"""
UpperCAmelCase__ = abs(self.pos_x - self.goal_x )
UpperCAmelCase__ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self : Optional[Any] , __UpperCAmelCase : Any ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
def __init__(self : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = [self.start]
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def lowercase_ (self : Optional[int] ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase__ = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
UpperCAmelCase__ = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Dict ) -> list[Node]:
"""simple docstring"""
UpperCAmelCase__ = []
for action in delta:
UpperCAmelCase__ = parent.pos_x + action[1]
UpperCAmelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def lowercase_ (self : str , __UpperCAmelCase : str ) -> Path:
"""simple docstring"""
UpperCAmelCase__ = node
UpperCAmelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase__ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCamelCase__ = (0, 0)
UpperCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
UpperCamelCase__ = GreedyBestFirst(init, goal)
UpperCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCamelCase__ = 2
for elem in grid:
print(elem)
| 360 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCAmelCase_ ( __A=None ) -> str:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(add_help=__A, allow_abbrev=__A )
# The main config parser
UpperCAmelCase__ = config_command_parser(__A )
# The subparser to add commands to
UpperCAmelCase__ = config_parser.add_subparsers(title="subcommands", dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(__A, parents=[parent_parser] )
update_command_parser(__A, parents=[parent_parser] )
return config_parser
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = get_config_parser()
UpperCAmelCase__ = config_parser.parse_args()
if not hasattr(__A, "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(__A )
if __name__ == "__main__":
main()
| 143 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase__ = [8, 5, 9, 7]
lowerCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : list[int] ,lowercase__ : list[list[int]] ,lowercase__ : list[list[int]] ,):
__lowercase = claim_vector
__lowercase = allocated_resources_table
__lowercase = maximum_claim_table
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def SCREAMING_SNAKE_CASE ( self : str ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def SCREAMING_SNAKE_CASE ( self : Any ):
return {self.__need().index(lowercase__ ): i for i in self.__need()}
def SCREAMING_SNAKE_CASE ( self : List[str] ,**lowercase__ : List[Any] ):
__lowercase = self.__need()
__lowercase = self.__allocated_resources_table
__lowercase = self.__available_resources()
__lowercase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 5_0 + '''\n''' )
while need_list:
__lowercase = False
for each_need in need_list:
__lowercase = True
for index, need in enumerate(lowercase__ ):
if need > available_resources[index]:
__lowercase = False
break
if execution:
__lowercase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowercase = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(lowercase__ )
# update available/freed resources stack
__lowercase = np.array(lowercase__ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(lowercase__ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(lowercase__ ) + 1}"
+ ''' '''.join(F"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(lowercase__ ) + 1}"
+ ''' '''.join(F"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(lowercase__ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(lowercase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | '''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1 | 0 |
from torch import nn
class lowercase_ ( nn.Module ):
def __init__( self : Any , A__ : Optional[int] , A__ : Any ) -> int:
super().__init__()
_snake_case = class_size
_snake_case = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_snake_case = nn.Linear(lowercase_ , lowercase_ )
def UpperCamelCase_ ( self : Optional[Any] , A__ : List[str] ) -> Tuple:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_snake_case = self.mlp(lowercase_ )
return logits
| 371 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def snake_case_(_UpperCamelCase ) -> List[Any]:
"""simple docstring"""
_snake_case = torch.exp(_UpperCamelCase )
_snake_case = torch.sum(_UpperCamelCase , dim=1 ) # sum of exp(x_i)
_snake_case = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_UpperCamelCase ) - B / A
class lowercase_ ( nn.Module ):
def __init__( self : Tuple , A__ : int ) -> Tuple:
super().__init__()
_snake_case = config.output_attentions
_snake_case = config.output_hidden_states
_snake_case = nn.ModuleList([BertLayer(A__ ) for _ in range(config.num_hidden_layers )] )
_snake_case = nn.ModuleList([BertHighway(A__ ) for _ in range(config.num_hidden_layers )] )
_snake_case = [-1 for _ in range(config.num_hidden_layers )]
def UpperCamelCase_ ( self : Any , A__ : Any ) -> Any:
if (type(A__ ) is float) or (type(A__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_snake_case = x
else:
_snake_case = x
def UpperCamelCase_ ( self : Any , A__ : Tuple ) -> int:
_snake_case = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCamelCase_ ( self : Tuple , A__ : Optional[int] , A__ : Dict=None , A__ : List[str]=None , A__ : Union[str, Any]=None , A__ : Dict=None , ) -> Dict:
_snake_case = ()
_snake_case = ()
_snake_case = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
_snake_case = layer_module(
A__ , A__ , head_mask[i] , A__ , A__ )
_snake_case = layer_outputs[0]
if self.output_attentions:
_snake_case = all_attentions + (layer_outputs[1],)
_snake_case = (hidden_states,)
if self.output_hidden_states:
_snake_case = current_outputs + (all_hidden_states,)
if self.output_attentions:
_snake_case = current_outputs + (all_attentions,)
_snake_case = self.highway[i](A__ )
# logits, pooled_output
if not self.training:
_snake_case = highway_exit[0]
_snake_case = entropy(A__ )
_snake_case = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_snake_case = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_snake_case = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ , i + 1 )
else:
_snake_case = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
_snake_case = (hidden_states,)
if self.output_hidden_states:
_snake_case = outputs + (all_hidden_states,)
if self.output_attentions:
_snake_case = outputs + (all_attentions,)
_snake_case = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , __lowercase , )
class lowercase_ ( __lowercase ):
def __init__( self : Optional[Any] , A__ : Any ) -> str:
super().__init__(A__ )
_snake_case = config
_snake_case = BertEmbeddings(A__ )
_snake_case = DeeBertEncoder(A__ )
_snake_case = BertPooler(A__ )
self.init_weights()
def UpperCamelCase_ ( self : Tuple ) -> Optional[Any]:
self.encoder.init_highway_pooler(self.pooler )
def UpperCamelCase_ ( self : List[str] ) -> Tuple:
return self.embeddings.word_embeddings
def UpperCamelCase_ ( self : Optional[Any] , A__ : str ) -> str:
_snake_case = value
def UpperCamelCase_ ( self : Union[str, Any] , A__ : List[Any] ) -> Any:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__ )
@add_start_docstrings_to_model_forward(A__ )
def UpperCamelCase_ ( self : int , A__ : Tuple=None , A__ : Union[str, Any]=None , A__ : Union[str, Any]=None , A__ : Optional[Any]=None , A__ : Dict=None , A__ : Any=None , A__ : str=None , A__ : Optional[int]=None , ) -> Dict:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_snake_case = input_ids.size()
elif inputs_embeds is not None:
_snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_snake_case = torch.ones(A__ , device=A__ )
if encoder_attention_mask is None:
_snake_case = torch.ones(A__ , device=A__ )
if token_type_ids is None:
_snake_case = torch.zeros(A__ , dtype=torch.long , device=A__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_snake_case = self.get_extended_attention_mask(A__ , A__ , A__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_snake_case = encoder_attention_mask[:, None, None, :]
_snake_case = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_snake_case = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_snake_case = self.get_head_mask(A__ , self.config.num_hidden_layers )
_snake_case = self.embeddings(
input_ids=A__ , position_ids=A__ , token_type_ids=A__ , inputs_embeds=A__ )
_snake_case = self.encoder(
A__ , attention_mask=A__ , head_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(A__ )
_snake_case = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase_ ( __lowercase ):
def __init__( self : Union[str, Any] , A__ : Dict , A__ : Optional[Any] ) -> List[str]:
_snake_case = message
_snake_case = exit_layer # start from 1!
class lowercase_ ( nn.Module ):
def __init__( self : Any , A__ : int ) -> Optional[Any]:
super().__init__()
_snake_case = BertPooler(A__ )
_snake_case = nn.Dropout(config.hidden_dropout_prob )
_snake_case = nn.Linear(config.hidden_size , config.num_labels )
def UpperCamelCase_ ( self : Optional[Any] , A__ : str ) -> Optional[int]:
# Pooler
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(A__ )
# "return" pooler_output
# BertModel
_snake_case = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_snake_case = bmodel_output[1]
_snake_case = self.dropout(A__ )
_snake_case = self.classifier(A__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __lowercase , )
class lowercase_ ( __lowercase ):
def __init__( self : List[str] , A__ : Optional[int] ) -> int:
super().__init__(A__ )
_snake_case = config.num_labels
_snake_case = config.num_hidden_layers
_snake_case = DeeBertModel(A__ )
_snake_case = nn.Dropout(config.hidden_dropout_prob )
_snake_case = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(A__ )
def UpperCamelCase_ ( self : Tuple , A__ : Optional[Any]=None , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[Any]=None , A__ : List[Any]=None , A__ : Union[str, Any]=None , A__ : Union[str, Any]=None , A__ : List[Any]=-1 , A__ : str=False , ) -> Dict:
_snake_case = self.num_layers
try:
_snake_case = self.bert(
A__ , attention_mask=A__ , token_type_ids=A__ , position_ids=A__ , head_mask=A__ , inputs_embeds=A__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_snake_case = outputs[1]
_snake_case = self.dropout(A__ )
_snake_case = self.classifier(A__ )
_snake_case = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_snake_case = e.message
_snake_case = e.exit_layer
_snake_case = outputs[0]
if not self.training:
_snake_case = entropy(A__ )
_snake_case = []
_snake_case = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_snake_case = MSELoss()
_snake_case = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_snake_case = []
for highway_exit in outputs[-1]:
_snake_case = highway_exit[0]
if not self.training:
highway_logits_all.append(A__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_snake_case = MSELoss()
_snake_case = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A__ )
if train_highway:
_snake_case = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_snake_case = (loss,) + outputs
if not self.training:
_snake_case = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_snake_case = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 278 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=1_0 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_frames
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = attention_type
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = (num_frames) * self.num_patches_per_frame + 1
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__SCREAMING_SNAKE_CASE = self.num_labels
return config
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TimesformerModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TimesformerForVideoClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
# verify the logits shape
__SCREAMING_SNAKE_CASE = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__lowercase : Tuple = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Dict = False
__lowercase : Optional[Any] = False
__lowercase : Union[str, Any] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TimesformerModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False):
__SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCAmelCase__)
if return_labels:
if model_class in get_values(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__)
return inputs_dict
def snake_case_ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TimesformerModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def snake_case_ ( self):
if not self.has_attentions:
pass
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length
__SCREAMING_SNAKE_CASE = self.model_tester.num_frames
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
self.assertEqual(out_len + 1 , len(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case_ ( self):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__SCREAMING_SNAKE_CASE = np.load(UpperCamelCase_ )
return list(UpperCamelCase_ )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""").to(
lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(video[:8] , return_tensors="""pt""").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor([-0.30_16, -0.77_13, -0.42_05]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
| 100 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 64 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = '''blip_text_model'''
def __init__( self : Tuple , _UpperCAmelCase : int=30_524 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[Any]=3_072 , _UpperCAmelCase : int=768 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Any=8 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Union[str, Any]=1E-1_2 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : str=30_522 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[int]=102 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : str , ):
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , sep_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[int] ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_A , _A = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
_A = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = '''blip_vision_model'''
def __init__( self : Any , _UpperCAmelCase : int=768 , _UpperCAmelCase : int=3_072 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : str=384 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[str]=1E-5 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Union[str, Any]=1E-1_0 , **_UpperCAmelCase : List[Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_A , _A = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
_A = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''blip'''
UpperCAmelCase : Tuple = True
def __init__( self : Union[str, Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : List[str]=2.6592 , _UpperCAmelCase : Optional[int]=256 , **_UpperCAmelCase : Tuple , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
_A = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
_A = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
_A = BlipTextConfig(**_UpperCAmelCase )
_A = BlipVisionConfig(**_UpperCAmelCase )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.02
_A = image_text_hidden_size
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , _UpperCAmelCase : BlipTextConfig , _UpperCAmelCase : BlipVisionConfig , **_UpperCAmelCase : List[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output
| 351 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
_A = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _snake_case ( _snake_case : str ) -> dict[str, str]:
'''simple docstring'''
_A = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_A = remove_duplicates(key.upper() )
_A = len(_snake_case )
# First fill cipher with key characters
_A = {alphabet[i]: char for i, char in enumerate(_snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_snake_case ) , 26 ):
_A = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_A = alphabet[i - offset]
_A = char
return cipher_alphabet
def _snake_case ( _snake_case : str , _snake_case : dict[str, str] ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(_snake_case , _snake_case ) for ch in message.upper() )
def _snake_case ( _snake_case : str , _snake_case : dict[str, str] ) -> str:
'''simple docstring'''
_A = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_snake_case , _snake_case ) for ch in message.upper() )
def _snake_case ( ) -> None:
'''simple docstring'''
_A = input('Enter message to encode or decode: ' ).strip()
_A = input('Enter keyword: ' ).strip()
_A = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_A = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_A = create_cipher_map(_snake_case )
print(func(_snake_case , _snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 271 | 0 |
from collections import deque
from .hash_table import HashTable
class A ( UpperCAmelCase_ ):
def __init__(self : List[Any] , *__UpperCAmelCase : int , **__UpperCAmelCase : str ) -> int:
"""simple docstring"""
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase__ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__UpperCAmelCase )
UpperCAmelCase__ = self.values[key]
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return (
sum(self.charge_factor - len(__UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase_ (self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None ) -> Optional[int]:
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(__UpperCAmelCase , __UpperCAmelCase )
| 65 | import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] ={}
lowerCAmelCase : Dict[Optional[str], str] ={}
lowerCAmelCase : Dict[Optional[str], Exception] ={}
def UpperCAmelCase_ ( __lowerCamelCase : type ,__lowerCamelCase : Optional[str] ,__lowerCamelCase : Optional[List[str]] = None ,):
lowercase_ :Optional[int] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
lowercase_ :int = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
lowercase_ :str = format_type
def UpperCAmelCase_ ( __lowerCamelCase : Exception ,__lowerCamelCase : Optional[str] ,__lowerCamelCase : Optional[List[str]] = None ):
lowercase_ :List[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowercase_ :List[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase : Optional[int] =ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase : Tuple =ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase : str =ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase_ ( __lowerCamelCase : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase_ ( __lowerCamelCase : Optional[str] ,**__lowerCamelCase : str ):
lowercase_ :Union[str, Any] = get_format_type_from_alias(__lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 147 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : str ={
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class a_ ( _lowerCAmelCase ):
__A = "cvt"
def __init__( self : Tuple , lowercase : str=3 , lowercase : str=[7, 3, 3] , lowercase : List[str]=[4, 2, 2] , lowercase : Dict=[2, 1, 1] , lowercase : int=[64, 192, 384] , lowercase : Dict=[1, 3, 6] , lowercase : Dict=[1, 2, 10] , lowercase : Any=[4.0, 4.0, 4.0] , lowercase : Tuple=[0.0, 0.0, 0.0] , lowercase : List[str]=[0.0, 0.0, 0.0] , lowercase : List[str]=[0.0, 0.0, 0.1] , lowercase : Any=[True, True, True] , lowercase : Any=[False, False, True] , lowercase : Optional[Any]=["dw_bn", "dw_bn", "dw_bn"] , lowercase : int=[3, 3, 3] , lowercase : str=[1, 1, 1] , lowercase : List[Any]=[2, 2, 2] , lowercase : Tuple=[1, 1, 1] , lowercase : Optional[Any]=[1, 1, 1] , lowercase : str=0.02 , lowercase : str=1e-1_2 , **lowercase : str , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :List[Any] = num_channels
lowercase_ :Union[str, Any] = patch_sizes
lowercase_ :Tuple = patch_stride
lowercase_ :List[Any] = patch_padding
lowercase_ :List[Any] = embed_dim
lowercase_ :Union[str, Any] = num_heads
lowercase_ :Any = depth
lowercase_ :str = mlp_ratio
lowercase_ :List[str] = attention_drop_rate
lowercase_ :List[Any] = drop_rate
lowercase_ :Union[str, Any] = drop_path_rate
lowercase_ :Any = qkv_bias
lowercase_ :Dict = cls_token
lowercase_ :int = qkv_projection_method
lowercase_ :Union[str, Any] = kernel_qkv
lowercase_ :Optional[Any] = padding_kv
lowercase_ :Optional[Any] = stride_kv
lowercase_ :Dict = padding_q
lowercase_ :Any = stride_q
lowercase_ :Dict = initializer_range
lowercase_ :Optional[Any] = layer_norm_eps
| 147 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Tuple=3.6):
__lowerCamelCase : Dict = tokenizer
__lowerCamelCase : List[str] = tokenizer.bos_token_id
__lowerCamelCase : str = dataset
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : Union[str, Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : List[str]):
__lowerCamelCase : Any = iter(self.dataset)
__lowerCamelCase : Dict = True
while more_examples:
__lowerCamelCase , __lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(SCREAMING_SNAKE_CASE__)['content'])
buffer_len += len(buffer[-1])
except StopIteration:
__lowerCamelCase : Optional[Any] = False
break
__lowerCamelCase : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__)['input_ids']
__lowerCamelCase : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id])
for i in range(0 ,len(SCREAMING_SNAKE_CASE__) ,self.seq_length):
__lowerCamelCase : Tuple = all_token_ids[i : i + self.seq_length]
if len(SCREAMING_SNAKE_CASE__) == self.seq_length:
yield torch.tensor(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Any = {'streaming': True}
__lowerCamelCase : Any = load_dataset(args.dataset_name , split='train' , **lowerCamelCase__ )
__lowerCamelCase : List[Any] = ConstantLengthDataset(lowerCamelCase__ , lowerCamelCase__ , seq_length=args.seq_length )
__lowerCamelCase : Optional[Any] = DataLoader(lowerCamelCase__ , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
model.eval()
__lowerCamelCase : Optional[int] = []
for step, batch in enumerate(lowerCamelCase__ ):
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowerCamelCase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCamelCase : Optional[Any] = torch.mean(torch.cat(lowerCamelCase__ ) )
try:
__lowerCamelCase : Optional[int] = torch.exp(lowerCamelCase__ )
except OverflowError:
__lowerCamelCase : Union[str, Any] = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
a =Accelerator()
# Parse configuration
a =HfArgumentParser(EvaluationArguments)
a =parser.parse_args()
set_seed(args.seed)
# Logging
a =logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
a =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a =create_dataloader(args)
# Prepare everything with our `accelerator`.
a , a =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
a , a =evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 73 | import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __snake_case ( unittest.TestCase ):
@require_torch
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
snake_case__ : Union[str, Any] = load_dataset('ashraq/esc50' )
snake_case__ : List[Any] = dataset['train']['audio'][-1]['array']
snake_case__ : Tuple = audio_classifier(__UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [{'score': 0.5_0_1, 'label': 'Sound of a dog'}, {'score': 0.4_9_9, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def __a ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
@require_torch
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
snake_case__ : Dict = load_dataset('ashraq/esc50' )
snake_case__ : Optional[int] = dataset['train']['audio'][-1]['array']
snake_case__ : Tuple = audio_classifier(__UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
] , )
snake_case__ : str = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
snake_case__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def __a ( self ) -> Any:
'''simple docstring'''
pass
| 143 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _A ( __UpperCAmelCase ,__UpperCAmelCase ):
@register_to_config
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int = 128 , __SCREAMING_SNAKE_CASE : int = 256 , __SCREAMING_SNAKE_CASE : float = 20_00.0 , __SCREAMING_SNAKE_CASE : int = 768 , __SCREAMING_SNAKE_CASE : int = 12 , __SCREAMING_SNAKE_CASE : int = 12 , __SCREAMING_SNAKE_CASE : int = 64 , __SCREAMING_SNAKE_CASE : int = 2_048 , __SCREAMING_SNAKE_CASE : float = 0.1 , ):
'''simple docstring'''
super().__init__()
__a = nn.Sequential(
nn.Linear(__SCREAMING_SNAKE_CASE , d_model * 4 , bias=__SCREAMING_SNAKE_CASE) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__SCREAMING_SNAKE_CASE) , nn.SiLU() , )
__a = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = False
__a = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(p=__SCREAMING_SNAKE_CASE)
__a = nn.ModuleList()
for lyr_num in range(__SCREAMING_SNAKE_CASE):
# FiLM conditional T5 decoder
__a = DecoderLayer(d_model=__SCREAMING_SNAKE_CASE , d_kv=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , d_ff=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE)
self.decoders.append(__SCREAMING_SNAKE_CASE)
__a = TaLayerNorm(__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(p=__SCREAMING_SNAKE_CASE)
__a = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a , __a , __a = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__a = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
__a = self.conditioning_emb(__SCREAMING_SNAKE_CASE).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__a = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__a = torch.broadcast_to(
torch.arange(__SCREAMING_SNAKE_CASE , device=decoder_input_tokens.device) , (batch, seq_length) , )
__a = self.position_encoding(__SCREAMING_SNAKE_CASE)
__a = self.continuous_inputs_projection(__SCREAMING_SNAKE_CASE)
inputs += position_encodings
__a = self.dropout(__SCREAMING_SNAKE_CASE)
# decoder: No padding present.
__a = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
__a = [(x, self.encoder_decoder_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__a = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
__a = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
__a = lyr(
__SCREAMING_SNAKE_CASE , conditioning_emb=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )[0]
__a = self.decoder_norm(__SCREAMING_SNAKE_CASE)
__a = self.post_dropout(__SCREAMING_SNAKE_CASE)
__a = self.spec_out(__SCREAMING_SNAKE_CASE)
return spec_out
class _A ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-6):
'''simple docstring'''
super().__init__()
__a = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__SCREAMING_SNAKE_CASE , d_kv=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__SCREAMING_SNAKE_CASE , d_kv=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE , layer_norm_epsilon=__SCREAMING_SNAKE_CASE , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__SCREAMING_SNAKE_CASE , d_ff=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE , layer_norm_epsilon=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a = self.layer[0](
__SCREAMING_SNAKE_CASE , conditioning_emb=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , )
if encoder_hidden_states is not None:
__a = torch.where(encoder_attention_mask > 0 , 0 , -1E10).to(
encoder_hidden_states.dtype)
__a = self.layer[1](
__SCREAMING_SNAKE_CASE , key_value_states=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , )
# Apply Film Conditional Feed Forward layer
__a = self.layer[-1](__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return (hidden_states,)
class _A ( nn.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
super().__init__()
__a = TaLayerNorm(__SCREAMING_SNAKE_CASE)
__a = TaFiLMLayer(in_features=d_model * 4 , out_features=__SCREAMING_SNAKE_CASE)
__a = Attention(query_dim=__SCREAMING_SNAKE_CASE , heads=__SCREAMING_SNAKE_CASE , dim_head=__SCREAMING_SNAKE_CASE , out_bias=__SCREAMING_SNAKE_CASE , scale_qk=__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , ):
'''simple docstring'''
__a = self.layer_norm(__SCREAMING_SNAKE_CASE)
if conditioning_emb is not None:
__a = self.FiLMLayer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Self-attention block
__a = self.attention(__SCREAMING_SNAKE_CASE)
__a = hidden_states + self.dropout(__SCREAMING_SNAKE_CASE)
return hidden_states
class _A ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
super().__init__()
__a = Attention(query_dim=__SCREAMING_SNAKE_CASE , heads=__SCREAMING_SNAKE_CASE , dim_head=__SCREAMING_SNAKE_CASE , out_bias=__SCREAMING_SNAKE_CASE , scale_qk=__SCREAMING_SNAKE_CASE)
__a = TaLayerNorm(__SCREAMING_SNAKE_CASE , eps=__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a = self.layer_norm(__SCREAMING_SNAKE_CASE)
__a = self.attention(
__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , attention_mask=attention_mask.squeeze(1) , )
__a = hidden_states + self.dropout(__SCREAMING_SNAKE_CASE)
return layer_output
class _A ( nn.Module ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__()
__a = TaDenseGatedActDense(d_model=__SCREAMING_SNAKE_CASE , d_ff=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE)
__a = TaFiLMLayer(in_features=d_model * 4 , out_features=__SCREAMING_SNAKE_CASE)
__a = TaLayerNorm(__SCREAMING_SNAKE_CASE , eps=__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple=None):
'''simple docstring'''
__a = self.layer_norm(__SCREAMING_SNAKE_CASE)
if conditioning_emb is not None:
__a = self.film(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.DenseReluDense(__SCREAMING_SNAKE_CASE)
__a = hidden_states + self.dropout(__SCREAMING_SNAKE_CASE)
return hidden_states
class _A ( nn.Module ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
super().__init__()
__a = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE)
__a = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE)
__a = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(__SCREAMING_SNAKE_CASE)
__a = NewGELUActivation()
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.act(self.wi_a(__SCREAMING_SNAKE_CASE))
__a = self.wi_a(__SCREAMING_SNAKE_CASE)
__a = hidden_gelu * hidden_linear
__a = self.dropout(__SCREAMING_SNAKE_CASE)
__a = self.wo(__SCREAMING_SNAKE_CASE)
return hidden_states
class _A ( nn.Module ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int=1E-6):
'''simple docstring'''
super().__init__()
__a = nn.Parameter(torch.ones(__SCREAMING_SNAKE_CASE))
__a = eps
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=__SCREAMING_SNAKE_CASE)
__a = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__a = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class _A ( nn.Module ):
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.04_47_15 * torch.pow(__SCREAMING_SNAKE_CASE , 3.0))))
class _A ( nn.Module ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
super().__init__()
__a = nn.Linear(__SCREAMING_SNAKE_CASE , out_features * 2 , bias=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = self.scale_bias(__SCREAMING_SNAKE_CASE)
__a , __a = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , -1)
__a = x * (1 + scale) + shift
return x
| 131 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__snake_case :int = logging.get_logger(__name__)
class _A :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = question_encoder
__a = generator
__a = self.question_encoder
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if os.path.isfile(__SCREAMING_SNAKE_CASE):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''question_encoder_tokenizer''')
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''generator_tokenizer''')
self.question_encoder.save_pretrained(__SCREAMING_SNAKE_CASE)
self.generator.save_pretrained(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : List[Any] , __SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE)
if config is None:
__a = RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = AutoTokenizer.from_pretrained(
__SCREAMING_SNAKE_CASE , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
__a = AutoTokenizer.from_pretrained(
__SCREAMING_SNAKE_CASE , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE)
def __call__( self : Dict , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return self.current_tokenizer(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return self.generator.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
return self.generator.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.question_encoder
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.generator
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "longest" , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __SCREAMING_SNAKE_CASE , )
if max_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
text_target=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = labels['''input_ids''']
return model_inputs
| 131 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : int) ->str:
'''simple docstring'''
A__ = [[] for _ in range(UpperCAmelCase__)]
A__ = size
def __getitem__( self : List[Any] , UpperCAmelCase__ : int) ->Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex])
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
return self._size
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->Optional[Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''')
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''')
self._graph[from_vertex].append(Edge(UpperCAmelCase__ , UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->int | None:
'''simple docstring'''
A__ = deque([start_vertex])
A__ = [None] * self.size
A__ = 0
while queue:
A__ = queue.popleft()
A__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A__ = current_distance + edge.weight
A__ = distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase__ , UpperCAmelCase__)
and new_distance >= dest_vertex_distance
):
continue
A__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''')
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
lowerCAmelCase_ = [800, 1333]
lowerCAmelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = 330
lowerCAmelCase_ = 14
lowerCAmelCase_ = 6
lowerCAmelCase_ = 1320
elif "yolos_s" in yolos_name:
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCAmelCase_ = [800, 1344]
lowerCAmelCase_ = 91
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''coco-detection-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( _A , _A , _A = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( _A ):
if "backbone" in name:
lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __UpperCamelCase ( _A , _A ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(_A )
if "qkv" in key:
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = int(key_split[2] )
lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = val
return orig_state_dict
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A , _A = False ):
lowerCAmelCase_ = get_yolos_config(_A )
# load original state_dict
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model''']
# load 🤗 model
lowerCAmelCase_ = YolosForObjectDetection(_A )
model.eval()
lowerCAmelCase_ = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512
lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes
lowerCAmelCase_ , lowerCAmelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCAmelCase_ = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCAmelCase_ = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCAmelCase_ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCAmelCase_ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCAmelCase_ = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCAmelCase_ = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCAmelCase_ = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if push_to_hub:
lowerCAmelCase_ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
lowerCAmelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(_A , organization='''hustvl''' )
model.push_to_hub(_A , organization='''hustvl''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
from __future__ import annotations
class lowerCamelCase__ :
def __init__( self : List[str] , _a : list[list[int]] ):
a__: int =TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(_a ) != 0:
a__: List[Any] =len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_a ) != cols:
raise error
for value in row:
if not isinstance(_a , (int, float) ):
raise error
a__: str =rows
else:
a__: List[Any] =[]
def _lowerCamelCase ( self : List[Any] ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCamelCase ( self : Optional[int] ):
return len(self.rows )
@property
def _lowerCamelCase ( self : Optional[int] ):
return len(self.rows[0] )
@property
def _lowerCamelCase ( self : str ):
return (self.num_rows, self.num_columns)
@property
def _lowerCamelCase ( self : str ):
return self.order[0] == self.order[1]
def _lowerCamelCase ( self : List[Any] ):
a__: Dict =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_a )
def _lowerCamelCase ( self : str ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCamelCase ( self : int ):
return bool(self.determinant() )
def _lowerCamelCase ( self : str , _a : int , _a : int ):
a__: Any =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_a ).determinant()
def _lowerCamelCase ( self : Tuple , _a : int , _a : int ):
if (row + column) % 2 == 0:
return self.get_minor(_a , _a )
return -1 * self.get_minor(_a , _a )
def _lowerCamelCase ( self : List[str] ):
return Matrix(
[
[self.get_minor(_a , _a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCamelCase ( self : List[Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCamelCase ( self : Union[str, Any] ):
a__: Any =[
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_a )
def _lowerCamelCase ( self : Optional[Any] ):
a__: Any =self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Tuple ):
return str(self.rows )
def __str__( self : List[str] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(_a ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _lowerCamelCase ( self : int , _a : list[int] , _a : int | None = None ):
a__: Any =TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(_a , _a ):
raise type_error
for value in row:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(_a )
else:
a__: Tuple =self.rows[0:position] + [row] + self.rows[position:]
def _lowerCamelCase ( self : Any , _a : list[int] , _a : int | None = None ):
a__: List[Any] =TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(_a , _a ):
raise type_error
for value in column:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
a__: str =[self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
a__: Optional[int] =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Union[str, Any] , _a : object ):
if not isinstance(_a , _a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Optional[Any] , _a : object ):
return not self == other
def __neg__( self : List[str] ):
return self * -1
def __add__( self : int , _a : Matrix ):
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , _a : Matrix ):
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple , _a : Matrix | int | float ):
if isinstance(_a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_a , _a ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(_a , _a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : Optional[Any] , _a : int ):
if not isinstance(_a , _a ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
a__: List[str] =self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCamelCase ( cls : Tuple , _a : list[int] , _a : list[int] ):
return sum(row[i] * column[i] for i in range(len(_a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = '''mobilenet_v1'''
def __init__( self : int , _a : Tuple=3 , _a : str=2_2_4 , _a : Dict=1.0 , _a : List[Any]=8 , _a : Tuple="relu6" , _a : Dict=True , _a : Optional[int]=0.9_9_9 , _a : List[Any]=0.0_2 , _a : Optional[Any]=0.0_0_1 , **_a : Optional[int] , ):
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__: str =num_channels
a__: Union[str, Any] =image_size
a__: Dict =depth_multiplier
a__: Union[str, Any] =min_depth
a__: Any =hidden_act
a__: int =tf_padding
a__: Dict =classifier_dropout_prob
a__: Any =initializer_range
a__: List[str] =layer_norm_eps
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : int ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowerCamelCase ( self : Tuple ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowerCamelCase ( self : Dict ):
return 1e-4
| 42 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Union[str, Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Any = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class _UpperCAmelCase ( lowercase__ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = SqueezeBertTokenizer
def __init__( self : List[str] , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any="[UNK]" , lowerCAmelCase_ : str="[SEP]" , lowerCAmelCase_ : Any="[PAD]" , lowerCAmelCase_ : List[str]="[CLS]" , lowerCAmelCase_ : int="[MASK]" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Tuple , ) -> Optional[Any]:
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _a ) != do_lower_case
or normalizer_state.get('strip_accents' , _a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _a ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(_a , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**_a )
__lowerCAmelCase = do_lower_case
def lowercase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict=None ) -> Tuple:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : int , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> str:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> List[str]:
__lowerCAmelCase = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 284 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = None
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a ,'feature_size' ) )
self.assertTrue(hasattr(_a ,'sampling_rate' ) )
self.assertTrue(hasattr(_a ,'padding_value' ) )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_tester.prepare_inputs_for_common()
_a : str = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a ,processed_features[input_name] ) ) )
_a : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Union[str, Any] = BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
_a : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : str = BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
_a : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = feat_extract.model_input_names[0]
_a : int = BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
_a : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowercase ( self : Dict ,_a : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : Tuple ):
_a : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : Optional[Any] ,_a : Union[str, Any] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : int = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Tuple = BatchFeature({input_name: speech_inputs} )
_a : str = self.feat_extract_tester.seq_length_diff
_a : Dict = self.feat_extract_tester.max_seq_length + pad_diff
_a : Dict = self.feat_extract_tester.min_seq_length
_a : Optional[Any] = self.feat_extract_tester.batch_size
_a : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_a : int = feat_extract.pad(_a ,padding=_a )
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(_a ,padding='longest' )
_a : Any = input_a[input_name]
_a : Optional[Any] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
_a : List[str] = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
_a : str = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' )[input_name]
_a : int = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,return_tensors='np' )
_a : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_a : Tuple = feat_extract.pad(_a ,pad_to_multiple_of=10 )
_a : List[str] = input_a[input_name]
_a : str = feat_extract.pad(_a ,padding='longest' ,pad_to_multiple_of=10 )
_a : Tuple = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a )
_a : Any = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a ,return_tensors='np' ,)
_a : Dict = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
_a : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_a : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowercase ( self : List[Any] ,_a : Optional[int]=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : List[str] ):
_a : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : List[str] ,_a : List[str] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Any = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_a : Union[str, Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=_a )
_a : str = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
_a : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=_a ,)
_a : Any = input_a[input_name]
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
_a : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a ,return_tensors='np' ,)
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a )
_a : Tuple = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
_a : Dict = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' ,truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_a : Optional[Any] = 12
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,truncation=_a ,)
_a : Tuple = input_a[input_name]
_a : str = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,)
_a : List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_a : List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_a : Union[str, Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
self._check_truncation(numpify=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=_a )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Optional[int] = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : Dict = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : Any = feat_extract.pad(_a ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = self.feat_extract_dict
_a : List[Any] = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Tuple = [len(_a ) for x in speech_inputs]
_a : int = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : str = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_dict
_a : Tuple = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = [len(_a ) for x in speech_inputs]
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Any = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = min(_a )
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,truncation=_a ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 271 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''spiece.model'''}
a = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
a = {
'''AI-Sweden/gpt-sw3-126m''': 2_048,
'''AI-Sweden/gpt-sw3-350m''': 2_048,
'''AI-Sweden/gpt-sw3-1.6b''': 2_048,
'''AI-Sweden/gpt-sw3-6.7b''': 2_048,
'''AI-Sweden/gpt-sw3-20b''': 2_048,
}
class lowercase_ ( A__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Union[str, Any] , ):
_A = {} if sp_model_kwargs is None else sp_model_kwargs
_A = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_A = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_A = '<|endoftext|>' if eos_token is None else eos_token
_A = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_A = unk_token if pad_token is None else pad_token
_A = eos_token if bos_token is None else bos_token
else:
_A = '<pad>' if pad_token is None else pad_token
_A = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
# Used for whitespace normalization in input texts
# fmt : off
_A = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_A = re.compile(
F'''[{"".join(map(__snake_case , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]''' )
def __getstate__( self : Union[str, Any] ):
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : List[Any] , _UpperCAmelCase : int ):
_A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCAmelCase_ ( self : str ):
return len(self.sp_model )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str ):
_A = self.non_printing_characters_re.sub('' , __snake_case )
# Normalize whitespaces
_A = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_A = unicodedata.normalize('NFC' , __snake_case )
return text
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str , **_UpperCAmelCase : Dict ):
_A = self.preprocess_text(__snake_case )
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str ):
return self.sp_model.PieceToId(__snake_case )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : int ):
return self.sp_model.IdToPiece(__snake_case )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : str ):
return out_string
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[str] ):
_A = []
_A = ''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
_A = True
_A = []
else:
current_sub_tokens.append(__snake_case )
_A = False
out_string += self.sp_model.decode(__snake_case )
return out_string
def lowerCAmelCase_ ( self : Dict ):
_A = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : Union[str, bool] = False ):
if isinstance(__snake_case , __snake_case ):
_A = self.preprocess_text(__snake_case )
_A = self.sp_model.encode(__snake_case )
else:
_A = [self.preprocess_text(__snake_case ) for t in text]
_A = self.sp_model.encode(__snake_case )
if return_tensors is True or return_tensors == "pt":
_A = torch.tensor(__snake_case )
return token_ids
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Union[int, List[int]] ):
return self.sp_model.decode(__snake_case )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : "Conversation" ):
_A = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
_A = (
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(__snake_case ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=__snake_case )
| 353 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : bool , _snake_case : bool ) -> Tuple:
'''simple docstring'''
def run_func(_snake_case : Any ):
@wraps(_snake_case )
def run_in_eager_mode(*_snake_case : List[str] , **_snake_case : Tuple ):
return func(*_snake_case , **_snake_case )
@wraps(_snake_case )
@tf.function(experimental_compile=_snake_case )
def run_in_graph_mode(*_snake_case : Dict , **_snake_case : Tuple ):
return func(*_snake_case , **_snake_case )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int ) -> ["tf.Tensor"]:
'''simple docstring'''
_A = random.Random()
_A = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_snake_case , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : TensorFlowBenchmarkArguments
UpperCAmelCase : PretrainedConfig
UpperCAmelCase : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : str ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
# initialize GPU on separate process
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase )
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase )
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_A = (
hasattr(_UpperCAmelCase , 'architectures' )
and isinstance(config.architectures , _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_A = __import__('transformers' , fromlist=[model_class] )
_A = getattr(_UpperCAmelCase , _UpperCAmelCase )
_A = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_A = TF_MODEL_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
_A = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size' ) else config.encoder.vocab_size
_A = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , training=_UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_UpperCAmelCase , training=_UpperCAmelCase )
_A = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_A = (
hasattr(_UpperCAmelCase , 'architectures' )
and isinstance(config.architectures , _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_A = __import__('transformers' , fromlist=[model_class] )
_A = getattr(_UpperCAmelCase , _UpperCAmelCase )
_A = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_A = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
_A = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size' ) else config.encoder.vocab_size
_A = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_A = model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )[0]
_A = tf.gradients(_UpperCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )[0]
_A = tf.gradients(_UpperCAmelCase , model.trainable_variables )
return gradients
_A = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : int ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(_UpperCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_A = timeit.repeat(
_UpperCAmelCase , repeat=self.args.repeat , number=10 , )
return min(_UpperCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
_A = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
_A = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
_A = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_A = nvml.nvmlDeviceGetMemoryInfo(_UpperCAmelCase )
_A = meminfo.used
_A = Memory(_UpperCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
_A = None
else:
_A = measure_peak_memory_cpu(_UpperCAmelCase )
_A = Memory(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_A = stop_memory_tracing(_UpperCAmelCase )
if memory is None:
_A = summary.total
else:
_A = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 271 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
a : str = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
a : int = []
a : Dict = []
a : int = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
a : Union[str, Any] = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'emoji': True,
},
}
]
a : Dict = 0
for log in Path().glob('*.log'):
a : Any = 0
with open(log, 'r') as f:
for line in f:
a : str = json.loads(line)
if line.get('nodeid', '') != "":
a : Optional[Any] = line['nodeid']
if line.get('duration', None) is not None:
a : Optional[Any] = F'''{line["duration"]:.4f}'''
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
a : Optional[int] = []
log.unlink()
a : str = ''
a : Tuple = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
a : List[str] = []
a : Tuple = {}
for test in failed_tests:
a : Optional[Any] = test[0].split('::')
a : Dict = data[0].split('/')[-1]
if data[0] not in filesafailed:
a : List[Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
a : str = [test[0] for test in failed_table]
a : int = list(set(files))
# Count number of instances in failed_tests
a : Tuple = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
a : int = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
a : int = 'Too many failed tests, please see the full report in the Action results.'
a : List[str] = len(err) + 10
a : Optional[int] = message[: 3_000 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
a : Union[str, Any] = 'No failed tests! 🤗'
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
a : List[Any] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
a : Any = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
a : str = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
a : int = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
a : List[str] = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
a : Dict = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
a : Optional[int] = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
a : Tuple = row[0]
else:
a : int = ''
a : Any = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 147 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
a : int = {'facebook/blenderbot-3B': 128}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
A = BlenderbotTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Dict:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: int = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop("""type""" ) )
UpperCAmelCase_: Union[str, Any] = add_prefix_space
UpperCAmelCase_: List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = add_prefix_space
UpperCAmelCase_: Optional[Any] = """post_processor"""
UpperCAmelCase_: List[str] = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCAmelCase_: Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_: Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase_: int = tuple(state["""cls"""] )
UpperCAmelCase_: int = False
if state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: Any = add_prefix_space
UpperCAmelCase_: str = True
if state.get("""trim_offsets""", SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCAmelCase_: Tuple = trim_offsets
UpperCAmelCase_: Tuple = True
if changes_to_apply:
UpperCAmelCase_: str = getattr(SCREAMING_SNAKE_CASE_, state.pop("""type""" ) )
UpperCAmelCase_: str = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __snake_case (self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Dict = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCAmelCase_: Optional[int] = value
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: List[str] = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: List[Any] = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: str = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: List[Any] = [self.sep_token_id]
UpperCAmelCase_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Union[str, Any]:
return token_ids_a + [self.eos_token_id]
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[int]:
UpperCAmelCase_: Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = """ """.join(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = self.encode(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
UpperCAmelCase_: int = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 147 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: Tuple ={
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Tuple =[
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 351 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: int ={
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_: Union[str, Any] =['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_: List[str] =[
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 106 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _a :
_a : Tuple = PegasusConfig
_a : Optional[Any] = {}
_a : Optional[Any] = '''gelu'''
def __init__( self : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any=13 , _SCREAMING_SNAKE_CASE : Any=7 , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : int=False , _SCREAMING_SNAKE_CASE : Tuple=99 , _SCREAMING_SNAKE_CASE : Optional[int]=32 , _SCREAMING_SNAKE_CASE : Optional[Any]=5 , _SCREAMING_SNAKE_CASE : Union[str, Any]=4 , _SCREAMING_SNAKE_CASE : int=37 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE : str=20 , _SCREAMING_SNAKE_CASE : List[Any]=2 , _SCREAMING_SNAKE_CASE : Optional[Any]=1 , _SCREAMING_SNAKE_CASE : Any=0 , )-> Optional[Any]:
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Any = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Tuple = eos_token_id
lowerCAmelCase__ : Any = pad_token_id
lowerCAmelCase__ : Dict = bos_token_id
def UpperCAmelCase__( self : int )-> Optional[int]:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase__ : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase__ : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase__ : Any = prepare_pegasus_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = 20
lowerCAmelCase__ : Any = model_class_name(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase__ , lowerCAmelCase__ : int = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase__ : List[str] = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase__ : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase__ : Any = model.decode(
decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase__ : Dict = model.decode(
decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : List[str] = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any )-> Dict:
lowerCAmelCase__ : Any = 20
lowerCAmelCase__ : Optional[int] = model_class_name(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase__ , lowerCAmelCase__ : int = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase__ : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase__ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase__ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Optional[int] = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def lowerCamelCase_ ( _a , _a , _a , _a=None , _a=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ : Optional[Any] = np.not_equal(_a , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _a ( _lowercase , unittest.TestCase):
_a : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_a : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_a : int = True
_a : Tuple = False
_a : Any = False
_a : str = False
def UpperCAmelCase__( self : Tuple )-> List[str]:
lowerCAmelCase__ : Tuple = FlaxPegasusModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] )-> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase__( self : List[str] )-> str:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] )-> Any:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str )-> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any]=None , **_SCREAMING_SNAKE_CASE : str ):
return model.encode(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ : Union[str, Any] = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ : List[Any] = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__( self : Union[str, Any] )-> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ):
return model.decode(
decoder_input_ids=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , encoder_outputs=_SCREAMING_SNAKE_CASE , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ : Dict = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ : Union[str, Any] = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__( self : Union[str, Any] )-> Optional[int]:
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Any = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = np.ones((1, 1) )
lowerCAmelCase__ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : int )-> Tuple:
lowerCAmelCase__ : int = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase__ : int = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase__ : str = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCAmelCase__ : Dict = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowerCAmelCase__ : Union[str, Any] = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''np''' , truncation=_SCREAMING_SNAKE_CASE , max_length=512 , padding=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = model.generate(**_SCREAMING_SNAKE_CASE , num_beams=2 ).sequences
lowerCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
assert tgt_text == decoded
| 131 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _a ( _lowercase):
_a : Any = VOCAB_FILES_NAMES
_a : List[str] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _a ( _lowercase):
_a : int = VOCAB_FILES_NAMES
_a : List[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Any = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowercase)
class _a :
def __call__( self : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , **_SCREAMING_SNAKE_CASE : str , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
elif titles is None or texts is None:
lowerCAmelCase__ : Tuple = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : int = titles if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [titles]
lowerCAmelCase__ : Dict = texts if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [texts]
lowerCAmelCase__ : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = questions if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.' )
lowerCAmelCase__ : Union[str, Any] = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
lowerCAmelCase__ : str = super().__call__(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
lowerCAmelCase__ : Dict = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
lowerCAmelCase__ : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase__ : Tuple = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : BatchEncoding , _SCREAMING_SNAKE_CASE : DPRReaderOutput , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : int = 64 , _SCREAMING_SNAKE_CASE : int = 4 , )-> List[DPRSpanPrediction]:
lowerCAmelCase__ : Optional[int] = reader_input['''input_ids''']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reader_output[:3]
lowerCAmelCase__ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = sorted(range(_SCREAMING_SNAKE_CASE ) , reverse=_SCREAMING_SNAKE_CASE , key=relevance_logits.__getitem__ )
lowerCAmelCase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowerCAmelCase__ : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ : Any = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_SCREAMING_SNAKE_CASE , top_spans=_SCREAMING_SNAKE_CASE , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_SCREAMING_SNAKE_CASE , start_index=_SCREAMING_SNAKE_CASE , end_index=_SCREAMING_SNAKE_CASE , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , )-> List[DPRSpanPrediction]:
lowerCAmelCase__ : Union[str, Any] = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase__ : List[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] , reverse=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
lowerCAmelCase__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowercase)
class _a ( _lowercase , _lowercase):
_a : List[str] = VOCAB_FILES_NAMES
_a : str = READER_PRETRAINED_VOCAB_FILES_MAP
_a : Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
| 131 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case : Optional[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowercase ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ) -> str:
__lowerCAmelCase = ZeroShotClassificationPipeline(
model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(lowerCAmelCase_ , {'sequence': ANY(lowerCAmelCase_ ), 'labels': [ANY(lowerCAmelCase_ )], 'scores': [ANY(lowerCAmelCase_ )]} )
# No kwarg
__lowerCAmelCase = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(lowerCAmelCase_ , {'sequence': ANY(lowerCAmelCase_ ), 'labels': [ANY(lowerCAmelCase_ )], 'scores': [ANY(lowerCAmelCase_ )]} )
__lowerCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(lowerCAmelCase_ , {'sequence': ANY(lowerCAmelCase_ ), 'labels': [ANY(lowerCAmelCase_ )], 'scores': [ANY(lowerCAmelCase_ )]} )
__lowerCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
lowerCAmelCase_ , {'sequence': ANY(lowerCAmelCase_ ), 'labels': [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )], 'scores': [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__lowerCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
lowerCAmelCase_ , {'sequence': ANY(lowerCAmelCase_ ), 'labels': [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )], 'scores': [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__lowerCAmelCase = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(lowerCAmelCase_ , {'sequence': ANY(lowerCAmelCase_ ), 'labels': [ANY(lowerCAmelCase_ )], 'scores': [ANY(lowerCAmelCase_ )]} )
# https://github.com/huggingface/transformers/issues/13846
__lowerCAmelCase = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
lowerCAmelCase_ , [
{'sequence': ANY(lowerCAmelCase_ ), 'labels': [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )], 'scores': [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )]}
for i in range(1 )
] , )
__lowerCAmelCase = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
lowerCAmelCase_ , [
{'sequence': ANY(lowerCAmelCase_ ), 'labels': [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )], 'scores': [ANY(lowerCAmelCase_ ), ANY(lowerCAmelCase_ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase_ ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(lowerCAmelCase_ ):
classifier(lowerCAmelCase_ , candidate_labels='politics' )
with self.assertRaises(lowerCAmelCase_ ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(lowerCAmelCase_ ):
classifier('Who are you voting for in 2020?' , candidate_labels=lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(lowerCAmelCase_ ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=lowerCAmelCase_ , )
self.run_entailment_id(lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : Pipeline ) -> List[Any]:
__lowerCAmelCase = zero_shot_classifier.model.config
__lowerCAmelCase = config.labelaid
__lowerCAmelCase = zero_shot_classifier.entailment_id
__lowerCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__lowerCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__lowerCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__lowerCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__lowerCAmelCase = original_labelaid
self.assertEqual(lowerCAmelCase_ , zero_shot_classifier.entailment_id )
@require_torch
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 1_0_0 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
__lowerCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
__lowerCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def lowercase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
__lowerCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
__lowerCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
__lowerCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
__lowerCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 207 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case : Union[str, Any] = False
try:
_snake_case : Tuple = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str = None , lowerCAmelCase_ : list = [] ) -> Optional[int]:
__lowerCAmelCase = 0
__lowerCAmelCase = choices
__lowerCAmelCase = prompt
if sys.platform == "win32":
__lowerCAmelCase = '*'
else:
__lowerCAmelCase = '➔ '
def lowercase ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str = "" ) -> Any:
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , lowerCAmelCase_ )
else:
forceWrite(self.choices[index] , lowerCAmelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : int ) -> List[Any]:
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(lowerCAmelCase_ )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def lowercase ( self : Dict , lowerCAmelCase_ : Direction , lowerCAmelCase_ : int = 1 ) -> Union[str, Any]:
__lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowerCAmelCase_ )
move_cursor(lowerCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def lowercase ( self : Optional[int] ) -> Tuple:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def lowercase ( self : str ) -> Optional[Any]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def lowercase ( self : str ) -> Any:
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def lowercase ( self : Dict ) -> Optional[Any]:
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowerCAmelCase_ )] for number in range(1_0 )] )
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = int(chr(self.current_selection ) )
__lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , lowerCAmelCase_ )
else:
return
else:
return
def lowercase ( self : Any , lowerCAmelCase_ : int = 0 ) -> int:
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowerCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCAmelCase = int(builtins.input() )
except ValueError:
__lowerCAmelCase = default_choice
else:
__lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(lowerCAmelCase_ , '\n' )
return choice
| 207 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
_snake_case : Any = hf_hub_url(repo_id=__A , path=__A , revision=__A )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__A )}'''
| 317 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE__ ( __A = 1_500_000 ) -> int:
_snake_case = defaultdict(__A )
_snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , a_ , )
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = RobertaConfig
__a: Any = '''roberta'''
def __init__( self , lowercase_ ) -> Tuple:
'''simple docstring'''
super().__init__(lowercase_ )
lowerCAmelCase_ = RobertaEmbeddings(lowercase_ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , a_ , )
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[int] = RobertaConfig
__a: Union[str, Any] = '''roberta'''
def __init__( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowercase_ )
lowerCAmelCase_ = config.num_labels
lowerCAmelCase_ = config.num_hidden_layers
lowerCAmelCase_ = DeeRobertaModel(lowercase_ )
lowerCAmelCase_ = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase_ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=-1 , lowercase_=False , ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.num_layers
try:
lowerCAmelCase_ = self.roberta(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , )
lowerCAmelCase_ = outputs[1]
lowerCAmelCase_ = self.dropout(lowercase_ )
lowerCAmelCase_ = self.classifier(lowercase_ )
lowerCAmelCase_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase_ = e.message
lowerCAmelCase_ = e.exit_layer
lowerCAmelCase_ = outputs[0]
if not self.training:
lowerCAmelCase_ = entropy(lowercase_ )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ = MSELoss()
lowerCAmelCase_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ = CrossEntropyLoss()
lowerCAmelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase_ = []
for highway_exit in outputs[-1]:
lowerCAmelCase_ = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ = MSELoss()
lowerCAmelCase_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ = CrossEntropyLoss()
lowerCAmelCase_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase_ )
if train_highway:
lowerCAmelCase_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase_ = (loss,) + outputs
if not self.training:
lowerCAmelCase_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 14 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: int = StableDiffusionInpaintPipeline
__a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a: int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a: List[str] = frozenset([] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase_ = CLIPTextModel(lowercase_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ )
lowerCAmelCase_ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase_ = sd_pipe(**lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 14 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Tuple = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__a , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__a , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__a )
return parser.parse_args()
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Tuple = parse_args()
# Import training_script as a module.
_a : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_a : Union[str, Any] = script_fpath.stem
_a : Any = importlib.import_module(__a )
# Patch sys.argv
_a : Optional[Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 271 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = 'https://pypi.org/pypi/diffusers/json'
_a : int = json.loads(request.urlopen(__a ).read() )['releases'].keys()
return sorted(__a , key=lambda __a : version.Version(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a , exist_ok=__a )
_a : str = Path(__a ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_a : Dict = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a , exist_ok=__a )
_a : Optional[int] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : int = f.read()
# Imports of the form `import .xxx`
_a : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , __a , flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = False
_a : Optional[int] = [module_file]
_a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_a : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Union[str, Any] = Path(__a ).parent
_a : str = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [f"""{f}.py""" for f in new_import_files]
_a : List[str] = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.read()
# Imports of the form `import xxx`
_a : Optional[int] = re.findall('^\s*import\s+(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , __a , flags=re.MULTILINE )
# Only keep the top-level module
_a : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_a : Optional[int] = list(set(__a ) )
_a : List[str] = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep , '.' )
_a : Union[str, Any] = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : List[str] = dict(inspect.getmembers(__a , inspect.isclass ) )
_a : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __a )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_a : Any = cls
return pipeline_class
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , ):
"""simple docstring"""
_a : str = str(__a )
_a : Optional[Any] = os.path.join(__a , __a )
if os.path.isfile(__a ):
_a : Tuple = module_file_or_url
_a : Optional[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Any = f"""v{revision}"""
elif revision == "main":
_a : Optional[int] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__a , pipeline=__a )
try:
_a : Any = cached_download(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = 'git'
_a : Any = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a , __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Any = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a , submodule_path / module_file )
for module_needed in modules_needed:
_a : Dict = f"""{module_needed}.py"""
shutil.copy(os.path.join(__a , __a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a , __a ):
_a : Optional[Any] = use_auth_token
elif use_auth_token is True:
_a : List[Any] = HfFolder.get_token()
else:
_a : Dict = None
_a : int = model_info(__a , revision=__a , token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : Optional[int] = submodule_path / commit_hash
_a : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a , f"""{module_needed}.py""" , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return os.path.join(__a , __a )
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[str] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : str , ):
"""simple docstring"""
_a : Dict = get_cached_module_file(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return get_class_in_module(__a , final_module.replace('.py' , '' ) )
| 271 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class __a ( snake_case_ ):
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : str , UpperCAmelCase : Union[np.ndarray, bytes, str] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Any , **UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : Dict = {}
if "candidate_labels" in kwargs:
lowerCAmelCase_ : Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowerCAmelCase_ : Any = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def A ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=None , UpperCAmelCase : Union[str, Any]="This is a sound of {}." ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCAmelCase_ : Optional[Any] = requests.get(UpperCAmelCase ).content
else:
with open(UpperCAmelCase , """rb""" ) as f:
lowerCAmelCase_ : Optional[Any] = f.read()
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : str = ffmpeg_read(UpperCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(UpperCAmelCase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
lowerCAmelCase_ : str = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = candidate_labels
lowerCAmelCase_ : Optional[Any] = [hypothesis_template.format(UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase_ : Optional[int] = self.tokenizer(UpperCAmelCase , return_tensors=self.framework , padding=UpperCAmelCase )
lowerCAmelCase_ : Any = [text_inputs]
return inputs
def A ( self : str , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : List[Any] = model_inputs.pop("""candidate_labels""" )
lowerCAmelCase_ : Tuple = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = text_inputs[0]
else:
# Batching case.
lowerCAmelCase_ : str = text_inputs[0][0]
lowerCAmelCase_ : Any = self.model(**UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def A ( self : Optional[Any] , UpperCAmelCase : Dict ):
lowerCAmelCase_ : List[Any] = model_outputs.pop("""candidate_labels""" )
lowerCAmelCase_ : Tuple = model_outputs['logits'][0]
if self.framework == "pt":
lowerCAmelCase_ : Optional[Any] = logits.softmax(dim=0 )
lowerCAmelCase_ : Dict = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
lowerCAmelCase_ : Tuple = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase , UpperCAmelCase ) , key=lambda UpperCAmelCase : -x[0] )
]
return result
| 355 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : int = (wi_a, wi_a)
else:
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Optional[int] = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : Union[str, Any] = q.T
lowerCAmelCase_ : Any = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
lowerCAmelCase_ : Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Dict = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : Union[str, Any] = o.T
lowerCAmelCase_ : Any = q.T
lowerCAmelCase_ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Any = k.T
lowerCAmelCase_ : Any = o.T
lowerCAmelCase_ : Optional[int] = q.T
lowerCAmelCase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : List[Any] = wi[1].T
else:
lowerCAmelCase_ : Optional[Any] = wi.T
lowerCAmelCase_ : str = wo.T
lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 28 | 0 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _snake_case ( UpperCamelCase : Dataset , UpperCamelCase : Dict[str, str] ):
UpperCAmelCase : Any = args.log_outputs
UpperCAmelCase : Any = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCAmelCase : List[Any] = load_metric("""wer""" )
UpperCAmelCase : Any = load_metric("""cer""" )
# compute metrics
UpperCAmelCase : int = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
UpperCAmelCase : str = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
UpperCAmelCase : Tuple = F"WER: {wer_result}\nCER: {cer_result}"
print(UpperCamelCase )
with open(F"{dataset_id}_eval_results.txt" , """w""" ) as f:
f.write(UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase : str = F"log_{dataset_id}_predictions.txt"
UpperCAmelCase : Tuple = F"log_{dataset_id}_targets.txt"
with open(UpperCamelCase , """w""" ) as p, open(UpperCamelCase , """w""" ) as t:
# mapping function to write output
def write_to_file(UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
p.write(F"{i}" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F"{i}" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(UpperCamelCase , with_indices=UpperCamelCase )
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase : Dict = re.sub(UpperCamelCase , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCAmelCase : Optional[Any] = """ """.join(text.split(UpperCamelCase ) )
return text
def _snake_case ( UpperCamelCase : Tuple ):
# load dataset
UpperCAmelCase : Union[str, Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase : Any = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase : List[str] = dataset.cast_column("""audio""" , Audio(sampling_rate=UpperCamelCase ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase : Optional[int] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase : Tuple = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(UpperCamelCase : Any ):
UpperCAmelCase : Any = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase : Tuple = prediction["""text"""]
UpperCAmelCase : List[str] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCAmelCase : int = dataset.map(UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
A: List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
A: Union[str, Any] = parser.parse_args()
main(args)
| 109 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106 | 0 |
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
SCREAMING_SNAKE_CASE__ : List[Any] = ""
while len(SCREAMING_SNAKE_CASE__ ) % 3 != 0:
SCREAMING_SNAKE_CASE__ : str = "0" + bin_string
SCREAMING_SNAKE_CASE__ : List[Any] = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE__ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE__ ) )
oct_string += str(SCREAMING_SNAKE_CASE__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 191 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[Any]=1_3, _UpperCAmelCase : Optional[Any]=3_0, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : str=3, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Any=5, _UpperCAmelCase : Optional[Any]=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Optional[int]="gelu", _UpperCAmelCase : int=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=1_0, _UpperCAmelCase : List[Any]=0.02, _UpperCAmelCase : List[Any]=None, ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : Any = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : str = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str = num_patches + 1
def A_ ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, labels
def A_ ( self : int ) -> Tuple:
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def A_ ( self : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMSNModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : List[Any], _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_UpperCAmelCase, labels=_UpperCAmelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ViTMSNModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTMSNModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def A_ ( self : Any ) -> Dict:
"""simple docstring"""
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE__ : List[str] = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Dict = image_processor(images=_UpperCAmelCase, return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**_UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _UpperCAmelCase, atol=1E-4 ) )
| 191 | 1 |
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def a ( lowerCamelCase_ , lowerCamelCase_=0 ):
'''simple docstring'''
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[column] )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCamelCase_ ):
lowercase__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase__ = current_dis
return min_dis
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCamelCase_ ):
for j in range(max(0 , i - 6 ) , lowerCamelCase_ ):
lowercase__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase__ = current_dis
return min_dis
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowerCamelCase_ , lowerCamelCase_ )
# recursion
lowercase__ = points_counts // 2
lowercase__ = closest_pair_of_points_sqr(
lowerCamelCase_ , points_sorted_on_y[:mid] , lowerCamelCase_ )
lowercase__ = closest_pair_of_points_sqr(
lowerCamelCase_ , points_sorted_on_y[mid:] , points_counts - mid )
lowercase__ = min(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCamelCase_ )
lowercase__ = dis_between_closest_in_strip(
lowerCamelCase_ , len(lowerCamelCase_ ) , lowerCamelCase_ )
return min(lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = column_based_sort(lowerCamelCase_ , column=0 )
lowercase__ = column_based_sort(lowerCamelCase_ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
) ** 0.5
if __name__ == "__main__":
A__ : Dict = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 207 |
from math import asin, atan, cos, radians, sin, sqrt, tan
A__ : Optional[int] = 637_8137.0
A__ : List[str] = 635_6752.31_4245
A__ : Union[str, Any] = 6_37_81_37
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = radians(lowerCamelCase_ )
lowercase__ = radians(lowerCamelCase_ )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(lowerCamelCase_ ) * cos(lowerCamelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 1 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[Any] = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCAmelCase : Dict = cached_file(_UpperCAmelCase, _UpperCAmelCase, force_download=not use_cached_models )
__UpperCAmelCase : Optional[int] = config_class.from_json_file(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Any = True
print(F"Building TensorFlow model from configuration: {config}" )
__UpperCAmelCase : Tuple = model_class(_UpperCAmelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCAmelCase : int = cached_file(
_UpperCAmelCase, _UpperCAmelCase, force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCAmelCase : Any = load_pytorch_checkpoint_in_tfa_model(_UpperCAmelCase, _UpperCAmelCase )
if compare_with_pt_model:
__UpperCAmelCase : Tuple = tf_model(tf_model.dummy_inputs, training=_UpperCAmelCase ) # build the network
__UpperCAmelCase : Tuple = torch.load(_UpperCAmelCase, map_location="cpu" )
__UpperCAmelCase : int = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_UpperCAmelCase, config=_UpperCAmelCase, state_dict=_UpperCAmelCase )
with torch.no_grad():
__UpperCAmelCase : str = pt_model(**pt_model.dummy_inputs )
__UpperCAmelCase : str = pto[0].numpy()
__UpperCAmelCase : int = tfo[0].numpy()
__UpperCAmelCase : int = np.amax(np.abs(np_pt - np_tf ) )
print(F"Max absolute difference between models outputs {diff}" )
assert diff <= 2E-2, F"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(F"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(_UpperCAmelCase, save_format="h5" )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, ):
if args_model_type is None:
__UpperCAmelCase : Optional[int] = list(MODEL_CLASSES.keys() )
else:
__UpperCAmelCase : Optional[Any] = [args_model_type]
for j, model_type in enumerate(_UpperCAmelCase, start=1 ):
print("=" * 100 )
print(F" Converting model type {j}/{len(_UpperCAmelCase )}: {model_type}" )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCAmelCase : Dict = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCAmelCase : List[str] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_UpperCAmelCase, _UpperCAmelCase ), start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
__UpperCAmelCase : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
F" Converting checkpoint {i}/{len(_UpperCAmelCase )}: {model_shortcut_name} - model_type {model_type}" )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
__UpperCAmelCase : int = cached_file(_UpperCAmelCase, _UpperCAmelCase, force_download=not use_cached_models )
else:
__UpperCAmelCase : Tuple = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCAmelCase : List[Any] = cached_file(_UpperCAmelCase, _UpperCAmelCase, force_download=not use_cached_models )
else:
__UpperCAmelCase : Optional[Any] = model_shortcut_name
if os.path.isfile(_UpperCAmelCase ):
__UpperCAmelCase : List[str] = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=_UpperCAmelCase, pytorch_checkpoint_path=_UpperCAmelCase, config_file=_UpperCAmelCase, tf_dump_path=os.path.join(_UpperCAmelCase, model_shortcut_name + "-tf_model.h5" ), compare_with_pt_model=_UpperCAmelCase, )
if remove_cached_files:
os.remove(_UpperCAmelCase )
os.remove(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowerCAmelCase__ : Dict = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 37 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCAmelCase__ : str = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : Dict = (32, 32)
__UpperCAmelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : str = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
__UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : str = 4
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : Optional[int] = (32, 32)
__UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return (4, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Dict = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
__UpperCAmelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model_accelerate.to(UpperCAmelCase_ )
model_accelerate.eval()
__UpperCAmelCase : Optional[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase : int = noise.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = model_accelerate(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ , low_cpu_mem_usage=UpperCAmelCase_ )
model_normal_load.to(UpperCAmelCase_ )
model_normal_load.eval()
__UpperCAmelCase : Optional[Any] = model_normal_load(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase : Optional[int] = noise.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : Dict = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCAmelCase : int = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 ) )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[str]=(32, 32) ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : Tuple = 3
__UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
__UpperCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : int = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Any = self.dummy_input
__UpperCAmelCase : int = floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = noise
__UpperCAmelCase : Optional[Any] = model(**UpperCAmelCase_ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : int = (256, 256)
__UpperCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Dict = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : Tuple = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCAmelCase : Tuple = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : Dict = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : Union[str, Any] = (32, 32)
__UpperCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# not required for this model
pass
| 37 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , UpperCAmelCase__ , )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = RobertaConfig
UpperCAmelCase__ = '''roberta'''
def __init__( self : Any , UpperCAmelCase__ : List[str]) ->Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase__)
A__ = RobertaEmbeddings(UpperCAmelCase__)
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , UpperCAmelCase__ , )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = RobertaConfig
UpperCAmelCase__ = '''roberta'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase__)
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCAmelCase__)
A__ = nn.Dropout(config.hidden_dropout_prob)
A__ = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=-1 , UpperCAmelCase__ : Optional[int]=False , ) ->Optional[int]:
'''simple docstring'''
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
A__ = outputs[1]
A__ = self.dropout(UpperCAmelCase__)
A__ = self.classifier(UpperCAmelCase__)
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCAmelCase__)
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1) , labels.view(-1))
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(UpperCAmelCase__)
if train_highway:
A__ = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 14 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 | 1 |
import re
def snake_case (__lowercase ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(r"[^ a-z A-Z 0-9 \s]" , str_ )]
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def snake_case (__lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
try:
_snake_case : int = split_input(__lowercase )
if upper:
_snake_case : List[Any] = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
_snake_case : Union[str, Any] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def snake_case (__lowercase ) -> str:
'''simple docstring'''
return to_simple_case(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
try:
_snake_case : Tuple = to_simple_case(__lowercase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
return to_complex_case(__lowercase , __lowercase , "_" )
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
return to_complex_case(__lowercase , __lowercase , "-" )
if __name__ == "__main__":
__import__('doctest').testmod() | 284 | import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : str = "laion/clap-htsat-unfused"
_snake_case : Dict = tempfile.mkdtemp()
def UpperCamelCase ( self , **lowercase_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : List[Any] = self.get_feature_extractor()
_snake_case : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
_snake_case : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : List[Any] = self.get_feature_extractor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : List[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_feature_extractor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
_snake_case : List[str] = floats_list((3, 1_000) )
_snake_case : Union[str, Any] = feature_extractor(lowercase_ , return_tensors="np" )
_snake_case : Any = processor(audios=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : str = self.get_feature_extractor()
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : Dict = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
_snake_case : Any = "This is a test string"
_snake_case : Optional[Any] = processor(text=lowercase_ )
_snake_case : Optional[Any] = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_feature_extractor()
_snake_case : Dict = self.get_tokenizer()
_snake_case : List[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
_snake_case : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : List[Any] = processor.batch_decode(lowercase_ )
_snake_case : Optional[int] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[str] = self.get_feature_extractor()
_snake_case : str = self.get_tokenizer()
_snake_case : Optional[int] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , ) | 284 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case :int = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''mobilenet_v1'''
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : Any=1.0 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : Dict="relu6" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=0.9_99 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , **__SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''')
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = min_depth
__a = hidden_act
__a = tf_padding
__a = classifier_dropout_prob
__a = initializer_range
__a = layer_norm_eps
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})])
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})])
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})])
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return 1E-4
| 49 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(A__ , A__ )
UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F.to_tensor(A__ )
UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: 'table', 1: 'table rotated'}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
UpperCamelCase = TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ )
UpperCamelCase = Image.open(A__ ).convert('RGB' )
UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 )
UpperCamelCase = model(A__ )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowercase__ : int = '''
import os
'''
lowercase__ : Union[str, Any] = '''
def foo():
import os
return False
'''
lowercase__ : List[str] = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
lowercase__ : str = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
lowercase__ : Optional[int] = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
lowercase__ : List[str] = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
lowercase__ : Optional[Any] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
lowercase__ : str = '''
import os
try:
import bar
except:
raise ValueError()
'''
lowercase__ : Dict = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
lowercase__ : Tuple = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
lowercase__ : Optional[int] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _a )
def __lowercase ( _a , _a ):
snake_case_ : List[str] = os.path.join(_a , '''test_file.py''' )
with open(_a , '''w''' ) as _tmp_file:
_tmp_file.write(_a )
snake_case_ : Tuple = get_imports(_a )
assert parsed_imports == ["os"]
| 155 |
"""simple docstring"""
def __lowercase ( _a , _a ):
return base * power(_a , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
lowercase__ : Optional[Any] = int(input('''Enter the base: ''').strip())
lowercase__ : int = int(input('''Enter the exponent: ''').strip())
lowercase__ : int = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowercase__ : Any = 1 / result
print(f'{base} to the power of {exponent} is {result}')
| 155 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[str] = '''vivit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=[2, 16, 16] ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu_fast" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-06 ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = hidden_size
__SCREAMING_SNAKE_CASE :List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Any = hidden_act
__SCREAMING_SNAKE_CASE :Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Any = initializer_range
__SCREAMING_SNAKE_CASE :Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Optional[int] = image_size
__SCREAMING_SNAKE_CASE :List[str] = num_frames
__SCREAMING_SNAKE_CASE :Any = tubelet_size
__SCREAMING_SNAKE_CASE :str = num_channels
__SCREAMING_SNAKE_CASE :Any = qkv_bias
super().__init__(**SCREAMING_SNAKE_CASE__ ) | 191 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( a_ : Callable , a_ : float , a_ : float , a_ : float , a_ : float ) -> np.ndarray:
__SCREAMING_SNAKE_CASE :List[Any] = int(np.ceil((x_end - xa) / step_size ) )
__SCREAMING_SNAKE_CASE :Optional[Any] = np.zeros((n + 1,) )
__SCREAMING_SNAKE_CASE :int = ya
__SCREAMING_SNAKE_CASE :str = xa
for k in range(a_ ):
__SCREAMING_SNAKE_CASE :Optional[int] = y[k] + step_size * ode_func(a_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 191 | 1 |
'''simple docstring'''
__a = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 360 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__a = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , ) -> List[Any]:
output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
else:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ) -> int:
snake_case__ : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case__ : List[Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
snake_case__ : Tuple = """cpu"""
snake_case__ : int = Path(_lowerCAmelCase )
# VAE DECODER
snake_case__ : List[str] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
snake_case__ : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
snake_case__ : Dict = vae_decoder.decode
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , 25 , 25 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=_lowerCAmelCase , )
del vae_decoder
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 43 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ) -> int:
lowerCAmelCase__ : Dict = {}
if top_k is not None:
lowerCAmelCase__ : Optional[int] = top_k
return {}, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : List[str] = load_image(__UpperCAmelCase )
lowerCAmelCase__ : Any = self.image_processor(images=__UpperCAmelCase ,return_tensors=self.framework )
return model_inputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase )
return model_outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=5 ) -> Any:
if top_k > self.model.config.num_labels:
lowerCAmelCase__ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase__ : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = probs.topk(__UpperCAmelCase )
elif self.framework == "tf":
lowerCAmelCase__ : Any = stable_softmax(model_outputs.logits ,axis=-1 )[0]
lowerCAmelCase__ : List[Any] = tf.math.top_k(__UpperCAmelCase ,k=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ : Tuple = scores.tolist()
lowerCAmelCase__ : int = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase ,__UpperCAmelCase )]
| 37 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
with open(UpperCamelCase , """rb""" ) as flax_state_f:
lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values()
if any(UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCAmelCase__ : Dict = jax.tree_util.tree_map(
lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase )
lowerCAmelCase__ : Any = """"""
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" )
lowerCAmelCase__ : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase ):
lowerCAmelCase__ : List[str] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor
lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase )
# remove from missing keys
missing_keys.remove(UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase )
pt_model.load_state_dict(UpperCamelCase )
# re-transform missing_keys to list
lowerCAmelCase__ : Optional[int] = list(UpperCamelCase )
if len(UpperCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 37 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : Dict = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class snake_case ( UpperCAmelCase ):
__magic_name__ = '''roberta-prelayernorm'''
def __init__( self : str , A : Dict=5_0_2_6_5 , A : str=7_6_8 , A : int=1_2 , A : int=1_2 , A : Optional[Any]=3_0_7_2 , A : Any="gelu" , A : List[Any]=0.1 , A : int=0.1 , A : str=5_1_2 , A : List[str]=2 , A : Optional[int]=0.02 , A : Union[str, Any]=1E-12 , A : List[Any]=1 , A : List[Any]=0 , A : Union[str, Any]=2 , A : int="absolute" , A : str=True , A : Dict=None , **A : int , ):
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a : List[str] = vocab_size
a : Optional[int] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Dict = num_attention_heads
a : str = hidden_act
a : Optional[Any] = intermediate_size
a : Any = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : List[str] = max_position_embeddings
a : int = type_vocab_size
a : Any = initializer_range
a : Optional[Any] = layer_norm_eps
a : List[Any] = position_embedding_type
a : Optional[Any] = use_cache
a : List[Any] = classifier_dropout
class snake_case ( UpperCAmelCase ):
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
a : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 371 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_UpperCamelCase : Optional[Any] = 'examples/'
_UpperCamelCase : Any = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_UpperCamelCase : List[str] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_UpperCamelCase : List[str] = 'README.md'
def snake_case (A_ :str , A_ :Optional[Any] , A_ :Any ):
'''simple docstring'''
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a, a : Any = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , A_ )
a : Union[str, Any] = re_pattern.sub(A_ , A_ )
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(A_ )
def snake_case (A_ :List[Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(A_ , A_ ) , A_ , pattern='examples' )
def snake_case (A_ :Tuple , A_ :Optional[Any]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A_ , A_ , A_ )
if not patch:
update_version_in_examples(A_ )
def snake_case ():
'''simple docstring'''
a : str = '🤗 Transformers currently provides the following architectures'
a : Dict = '1. Want to contribute a new model?'
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Optional[Any] = f.readlines()
# Find the start of the list.
a : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : int = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(A_ )
def snake_case ():
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : List[str] = f.read()
a : str = REPLACE_PATTERNS['init'][0].search(A_ ).groups()[0]
return packaging.version.parse(A_ )
def snake_case (A_ :Optional[Any]=False ):
'''simple docstring'''
a : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Tuple = default_version.base_version
elif patch:
a : Union[str, Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a : Optional[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a : Union[str, Any] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(A_ ) == 0:
a : int = default_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ , patch=A_ )
def snake_case ():
'''simple docstring'''
a : str = get_version()
a : Optional[int] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a : Optional[int] = current_version.base_version
# Check with the user we got that right.
a : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(A_ ) == 0:
a : Union[str, Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 186 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : str = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """data2vec-vision"""
def __init__( self : str , lowerCAmelCase_ : Optional[Any]=7_6_8 , lowerCAmelCase_ : Union[str, Any]=1_2 , lowerCAmelCase_ : str=1_2 , lowerCAmelCase_ : Optional[Any]=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Optional[Any]=1e-12 , lowerCAmelCase_ : Union[str, Any]=2_2_4 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=[3, 5, 7, 1_1] , lowerCAmelCase_ : List[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Optional[Any]=2_5_5 , **lowerCAmelCase_ : List[str] , ) -> Tuple:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : Optional[int] ) -> float:
return 1e-4
| 284 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def lowercase ( self : Optional[Any] ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def lowercase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : List[Any] ) -> int:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : int ) -> str:
__lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__lowerCAmelCase = tokenizer.subword_tokenizer
__lowerCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__lowerCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase ( self : Dict ) -> str:
pass # TODO add if relevant
def lowercase ( self : Any ) -> str:
pass # TODO add if relevant
def lowercase ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowercase ( self : str ) -> str:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__lowerCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 284 | 1 |
import operator as op
__SCREAMING_SNAKE_CASE = """scaler.pt"""
__SCREAMING_SNAKE_CASE = """pytorch_model"""
__SCREAMING_SNAKE_CASE = """random_states"""
__SCREAMING_SNAKE_CASE = """optimizer"""
__SCREAMING_SNAKE_CASE = """scheduler"""
__SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
__SCREAMING_SNAKE_CASE = """pytorch_model.bin.index.json"""
__SCREAMING_SNAKE_CASE = """model.safetensors"""
__SCREAMING_SNAKE_CASE = """model.safetensors.index.json"""
__SCREAMING_SNAKE_CASE = """1.10.2"""
__SCREAMING_SNAKE_CASE = """py38"""
__SCREAMING_SNAKE_CASE = """4.17.0"""
__SCREAMING_SNAKE_CASE = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
__SCREAMING_SNAKE_CASE = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
__SCREAMING_SNAKE_CASE = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
__SCREAMING_SNAKE_CASE = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
__SCREAMING_SNAKE_CASE = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
__SCREAMING_SNAKE_CASE = """2.0.1"""
__SCREAMING_SNAKE_CASE = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
__SCREAMING_SNAKE_CASE = ["""default""", """reduce-overhead""", """max-autotune"""]
__SCREAMING_SNAKE_CASE = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__SCREAMING_SNAKE_CASE = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
__SCREAMING_SNAKE_CASE = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
__SCREAMING_SNAKE_CASE = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""] | 371 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
__SCREAMING_SNAKE_CASE = {"""bert_for_seq_generation""": 512}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = []
a__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Optional[int]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Tuple , ) -> None:
A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
A : Union[str, Any] = vocab_file
A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
A : str = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Tuple:
A : Tuple = self.__dict__.copy()
A : Optional[int] = None
return state
def __setstate__( self : Dict , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A : int = {}
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any] ) -> Dict:
return self.sp_model.piece_to_id(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
A : Optional[int] = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[int] ) -> List[str]:
A : List[str] = []
A : List[str] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
A : Union[str, Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : str = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
A : str = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,) | 256 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
a = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'ernie_m'
_a = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : str , lowerCAmelCase : int = 25_0002 , lowerCAmelCase : int = 768 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 3072 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : int = 514 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 1e-05 , lowerCAmelCase : List[str]=None , lowerCAmelCase : str=False , lowerCAmelCase : Dict=0.0 , **lowerCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 155 |
"""simple docstring"""
def lowercase (snake_case__ : list ) -> list:
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowerCAmelCase = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 155 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __magic_name__ (tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = max_length
lowerCAmelCase_ = vocab
lowerCAmelCase_ = merges
lowerCAmelCase_ = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def __a ( cls , _a , *_a , **_a ) -> List[Any]:
lowerCAmelCase_ = [" ".join(_a ) for m in tokenizer.bpe_ranks.keys()]
lowerCAmelCase_ = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def __a ( cls , _a , *_a , **_a ) -> List[str]:
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def __a ( cls , _a ) -> Tuple:
return cls(**_a )
def __a ( self ) -> List[str]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __a ( self , _a , _a = None ) -> List[str]:
lowerCAmelCase_ = self.tf_tokenizer(_a )
lowerCAmelCase_ = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCAmelCase_ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCAmelCase_ , lowerCAmelCase_ = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 368 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22 | 0 |
a_ = 8.314_4598
def lowerCamelCase__ ( _a , _a):
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K")
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol")
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a_ = 300
a_ = 28
a_ = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 76 | import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 43 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 2 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2 | 1 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = [], []
while len(__lowerCamelCase ) > 1:
UpperCAmelCase_ , UpperCAmelCase_ : int = min(__lowerCamelCase ), max(__lowerCamelCase )
start.append(__lowerCamelCase )
end.append(__lowerCamelCase )
collection.remove(__lowerCamelCase )
collection.remove(__lowerCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 61 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = None
A_ : Any = 20
A_ : Any = self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
A_ : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
A_ : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
A_ : List[str] = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
A_ : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
A_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
A_ : List[Any] = jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Any = None
A_ : List[Any] = 10
A_ : str = 2
# create ramp distribution
A_ : Any = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
A_ : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : Tuple = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
A_ : Optional[int] = 5
A_ : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
A_ : Optional[Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
A_ : Dict = top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = None
A_ : Optional[Any] = 10
A_ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A_ : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
A_ : str = FlaxTopPLogitsWarper(0.8 )
A_ : Optional[int] = np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A_ : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# check edge cases with negative and extreme logits
A_ : Union[str, Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A_ : str = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
A_ : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
A_ : str = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = 20
A_ : Union[str, Any] = 4
A_ : Optional[Any] = 0
A_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
A_ : int = ids_tensor((batch_size, 20) , vocab_size=20 )
A_ : List[Any] = 5
A_ : Optional[int] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
A_ : Tuple = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = 15
A_ : int = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = 20
A_ : Optional[int] = 4
A_ : Optional[int] = 0
A_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
A_ : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
A_ : str = 1
A_ : List[str] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A_ : Optional[int] = 3
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = 20
A_ : str = 4
A_ : Dict = 0
A_ : Optional[int] = 5
A_ : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
A_ : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
A_ : Any = 4
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A_ : int = 3
A_ : Union[str, Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Union[str, Any] = 15
A_ : str = 2
A_ : int = 1
A_ : List[str] = 15
# dummy input_ids and scores
A_ : Tuple = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : int = input_ids.copy()
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = scores.copy()
# instantiate all dist processors
A_ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = 10
# no processor list
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[str] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
A_ : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : List[str] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Tuple = 15
A_ : List[str] = 2
A_ : List[str] = 1
A_ : Union[str, Any] = 15
# dummy input_ids and scores
A_ : Any = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = input_ids.copy()
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = scores.copy()
# instantiate all dist processors
A_ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[Any] = FlaxTopKLogitsWarper(3 )
A_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : str = 10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : Optional[int] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
A_ : Optional[int] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Dict = jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 186 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Dict=30 , lowerCAmelCase_ : Optional[int]=400 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=1 / 255 , lowerCAmelCase_ : List[Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ : str = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : List[Any] = min_resolution
UpperCAmelCase_ : Optional[Any] = max_resolution
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : Tuple = image_std
UpperCAmelCase_ : Dict = do_rescale
UpperCAmelCase_ : Any = rescale_factor
UpperCAmelCase_ : List[Any] = do_pad
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=False ) -> Any:
if not batched:
UpperCAmelCase_ : Optional[Any] = image_inputs[0]
if isinstance(lowerCAmelCase_ , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Any = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase_ : Union[str, Any] = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase_ : Any = self.size["shortest_edge"]
UpperCAmelCase_ : List[Any] = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase_ : Optional[Any] = self.size["shortest_edge"]
UpperCAmelCase_ : Optional[Any] = self.size["shortest_edge"]
else:
UpperCAmelCase_ : Optional[int] = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : item[0] )[0]
UpperCAmelCase_ : Optional[int] = max(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = DetaImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = DetaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_rescale" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_pad" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size" ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
UpperCAmelCase_ : str = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ : int = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
# prepare image and target
UpperCAmelCase_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCAmelCase_ : Optional[int] = json.loads(f.read() )
UpperCAmelCase_ : List[Any] = {"image_id": 39_769, "annotations": target}
# encode them
UpperCAmelCase_ : Optional[int] = DetaImageProcessor()
UpperCAmelCase_ : Optional[Any] = image_processing(images=lowerCAmelCase_ , annotations=lowerCAmelCase_ , return_tensors="pt" )
# verify pixel values
UpperCAmelCase_ : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase_ )
UpperCAmelCase_ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
# verify area
UpperCAmelCase_ : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase_ ) )
# verify boxes
UpperCAmelCase_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase_ , atol=1e-3 ) )
# verify image_id
UpperCAmelCase_ : List[Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase_ ) )
# verify is_crowd
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase_ ) )
# verify class_labels
UpperCAmelCase_ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase_ ) )
# verify orig_size
UpperCAmelCase_ : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase_ ) )
# verify size
UpperCAmelCase_ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase_ ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
# prepare image, target and masks_path
UpperCAmelCase_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCAmelCase_ : Optional[int] = json.loads(f.read() )
UpperCAmelCase_ : Union[str, Any] = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
UpperCAmelCase_ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCAmelCase_ : str = DetaImageProcessor(format="coco_panoptic" )
UpperCAmelCase_ : Union[str, Any] = image_processing(images=lowerCAmelCase_ , annotations=lowerCAmelCase_ , masks_path=lowerCAmelCase_ , return_tensors="pt" )
# verify pixel values
UpperCAmelCase_ : str = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
# verify area
UpperCAmelCase_ : List[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase_ ) )
# verify boxes
UpperCAmelCase_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase_ )
UpperCAmelCase_ : str = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase_ , atol=1e-3 ) )
# verify image_id
UpperCAmelCase_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase_ ) )
# verify is_crowd
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase_ ) )
# verify class_labels
UpperCAmelCase_ : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase_ ) )
# verify masks
UpperCAmelCase_ : int = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase_ )
# verify orig_size
UpperCAmelCase_ : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase_ ) )
# verify size
UpperCAmelCase_ : List[str] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase_ ) )
| 253 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase_ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 253 | 1 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Tuple = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class UpperCamelCase__ ( _lowercase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """owlvit_text_model"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=4_9_4_0_8 , SCREAMING_SNAKE_CASE_ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE_ : Any=2_0_4_8 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=8 , SCREAMING_SNAKE_CASE_ : List[Any]=1_6 , SCREAMING_SNAKE_CASE_ : str="quick_gelu" , SCREAMING_SNAKE_CASE_ : Dict=1E-5 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=1.0 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : List[str]=4_9_4_0_6 , SCREAMING_SNAKE_CASE_ : Dict=4_9_4_0_7 , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Optional[int] = hidden_size
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : List[Any] = max_position_embeddings
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : List[Any] = attention_dropout
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Tuple = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
cls._set_token_in_kwargs(__UpperCamelCase )
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
lowerCAmelCase_ : Optional[int] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class UpperCamelCase__ ( _lowercase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """owlvit_vision_model"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Any=7_6_8 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Dict=1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_2 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : int=7_6_8 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Tuple="quick_gelu" , SCREAMING_SNAKE_CASE_ : List[str]=1E-5 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=1.0 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
super().__init__(**__UpperCamelCase )
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : List[str] = image_size
lowerCAmelCase_ : Dict = patch_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : int = attention_dropout
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Tuple ):
cls._set_token_in_kwargs(__UpperCamelCase )
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
lowerCAmelCase_ : int = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class UpperCamelCase__ ( _lowercase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """owlvit"""
_SCREAMING_SNAKE_CASE = True
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Dict=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2.65_92 , SCREAMING_SNAKE_CASE_ : Tuple=True , **SCREAMING_SNAKE_CASE_ : Dict , ):
super().__init__(**__UpperCamelCase )
if text_config is None:
lowerCAmelCase_ : List[Any] = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
lowerCAmelCase_ : Optional[int] = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
lowerCAmelCase_ : Dict = OwlViTTextConfig(**__UpperCamelCase )
lowerCAmelCase_ : Any = OwlViTVisionConfig(**__UpperCamelCase )
lowerCAmelCase_ : List[str] = projection_dim
lowerCAmelCase_ : List[Any] = logit_scale_init_value
lowerCAmelCase_ : List[str] = return_dict
lowerCAmelCase_ : Union[str, Any] = 1.0
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
cls._set_token_in_kwargs(__UpperCamelCase )
lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : str = text_config
lowerCAmelCase_ : int = vision_config
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : str = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Dict = self.text_config.to_dict()
lowerCAmelCase_ : Dict = self.vision_config.to_dict()
lowerCAmelCase_ : int = self.__class__.model_type
return output
class UpperCamelCase__ ( _lowercase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return 1E-4
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : "ProcessorMixin" , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : Optional["TensorType"] = None , ):
lowerCAmelCase_ : List[str] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , framework=__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__UpperCamelCase , framework=__UpperCamelCase )
return {**text_input_dict, **image_input_dict}
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return 1_4
| 224 | """simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''input_values''', '''padding_mask''']
def __init__( self : Optional[Any] , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 2_4000 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = None , __UpperCamelCase : float = None , **__UpperCamelCase : Optional[Any] , ) -> Optional[int]:
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = chunk_length_s
_UpperCamelCase = overlap
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Union[str, Any] , __UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None , __UpperCamelCase : Optional[bool] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
_UpperCamelCase = True
_UpperCamelCase = bool(
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_UpperCamelCase = [np.asarray(__UpperCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_UpperCamelCase = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_UpperCamelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
_UpperCamelCase = None
_UpperCamelCase = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_UpperCamelCase = min(array.shape[0] for array in raw_audio )
_UpperCamelCase = int(np.floor(max_length / self.chunk_stride ) )
_UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_UpperCamelCase = max(array.shape[0] for array in raw_audio )
_UpperCamelCase = int(np.ceil(max_length / self.chunk_stride ) )
_UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_UpperCamelCase = '''max_length'''
else:
_UpperCamelCase = input_values
# normal padding on batch
if padded_inputs is None:
_UpperCamelCase = self.pad(
__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , padding=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
if padding:
_UpperCamelCase = padded_inputs.pop('''attention_mask''' )
_UpperCamelCase = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
_UpperCamelCase = example[..., None]
input_values.append(example.T )
_UpperCamelCase = input_values
if return_tensors is not None:
_UpperCamelCase = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 256 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[str] = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
UpperCAmelCase_ : Dict = "tf"
elif is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Any = "pt"
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ )
else:
UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : int = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : int = []
for i in range(lowercase_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : List[str] = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : List[str] = {}
for i in range(lowercase_ ):
# padding
UpperCAmelCase_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Any = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Tuple = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[Any] = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : str = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = padding
else:
UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 23 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23 | 1 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase :
def __init__( self : int , __lowercase : str , __lowercase : Any=13 , __lowercase : Dict=7 , __lowercase : str=True , __lowercase : Optional[int]=True , __lowercase : Union[str, Any]=True , __lowercase : Union[str, Any]=True , __lowercase : Union[str, Any]=99 , __lowercase : List[str]=24 , __lowercase : Dict=2 , __lowercase : int=6 , __lowercase : Optional[int]=37 , __lowercase : Any="gelu" , __lowercase : str=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Union[str, Any]=512 , __lowercase : Union[str, Any]=16 , __lowercase : List[str]=2 , __lowercase : str=0.0_2 , __lowercase : int=3 , __lowercase : List[str]=None , __lowercase : Optional[Any]=1000 , ):
"""simple docstring"""
__lowercase =parent
__lowercase =batch_size
__lowercase =seq_length
__lowercase =is_training
__lowercase =use_input_mask
__lowercase =use_token_type_ids
__lowercase =use_labels
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =type_sequence_label_size
__lowercase =initializer_range
__lowercase =num_labels
__lowercase =scope
__lowercase =range_bbox
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowercase =bbox[i, j, 3]
__lowercase =bbox[i, j, 1]
__lowercase =t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowercase =bbox[i, j, 2]
__lowercase =bbox[i, j, 0]
__lowercase =t
__lowercase =None
if self.use_input_mask:
__lowercase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowercase =None
if self.use_token_type_ids:
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase =None
__lowercase =None
if self.use_labels:
__lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase =self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case ( self : int ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case ( self : Optional[int] , __lowercase : Any , __lowercase : Dict , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Dict , ):
"""simple docstring"""
__lowercase =LiltModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase =model(snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
__lowercase =model(snake_case_ , bbox=snake_case_ , token_type_ids=snake_case_ )
__lowercase =model(snake_case_ , bbox=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case ( self : int , __lowercase : List[Any] , __lowercase : int , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : List[str] , ):
"""simple docstring"""
__lowercase =self.num_labels
__lowercase =LiltForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase =model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : List[Any] , __lowercase : Tuple , __lowercase : str , __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : str , ):
"""simple docstring"""
__lowercase =LiltForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase =model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : int ):
"""simple docstring"""
__lowercase =self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) =config_and_inputs
__lowercase ={
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case ( self : List[Any] , __lowercase : str , __lowercase : str , __lowercase : Dict , __lowercase : Any , __lowercase : Any ):
"""simple docstring"""
return True
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =LiltModelTester(self )
__lowercase =ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase =type
self.model_tester.create_and_check_model(*snake_case_ )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def snake_case ( self : Tuple ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase =LiltModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(snake_case_ )
__lowercase =torch.tensor([[1, 2]] , device=snake_case_ )
__lowercase =torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case_ )
# forward pass
with torch.no_grad():
__lowercase =model(input_ids=snake_case_ , bbox=snake_case_ )
__lowercase =torch.Size([1, 2, 768] )
__lowercase =torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=snake_case_ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case_ , atol=1E-3 ) )
| 141 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE :List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 22 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__UpperCamelCase : Optional[Any] = Lock()
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCamelCase__ : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCamelCase__ : List[str] = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCamelCase__ : Dict = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCamelCase__ : Union[str, Any] = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : int = []
UpperCamelCase__ : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCamelCase__ : Dict = Pipe()
UpperCamelCase__ : Optional[Any] = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCamelCase__ : List[Any] = temp_rs
UpperCamelCase__ : Optional[int] = temp_rr
for i in range(1 , len(__a ) - 1 ):
UpperCamelCase__ : int = Pipe()
UpperCamelCase__ : List[str] = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCamelCase__ : List[str] = temp_rs
UpperCamelCase__ : str = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
UpperCamelCase__ : List[str] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*__a )
UpperCamelCase__ : Tuple = odd_even_transposition(__a )
print('''Sorted List\n''' )
print(*__a )
if __name__ == "__main__":
main()
| 355 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__UpperCamelCase : List[Any] = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
inspect_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = path + '''.py'''
assert script_name in os.listdir(SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
inspect_metric(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = path + '''.py'''
assert script_name in os.listdir(SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : int = get_dataset_config_info(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE ):
get_dataset_config_info(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = get_dataset_config_names(SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = get_dataset_infos(SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCamelCase__ : List[str] = expected_configs[0]
assert expected_config in infos
UpperCamelCase__ : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = get_dataset_infos(SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCamelCase__ : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE ):
get_dataset_split_names(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
| 51 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.