code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# Initialise PyTorch model
__a = TaConfig.from_json_file(lowerCAmelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
__a = TaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 717 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class __UpperCAmelCase ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , __A = " " ):
__a = sentence_delimiter
def snake_case_ ( self , __A ):
return list(__A )
def snake_case_ ( self , __A ):
__a = []
for sent_idx, sentence in enumerate(__A ):
chars.extend(self.process_string(__A ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__A ) - 1:
chars.append(self.sentence_delimiter )
return chars
SCREAMING_SNAKE_CASE = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def snake_case_ ( self , __A , __A , __A=False ):
if concatenate_texts:
return jiwer.compute_measures(
__A , __A , truth_transform=__A , hypothesis_transform=__A , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__A , __A ):
__a = jiwer.compute_measures(
__A , __A , truth_transform=__A , hypothesis_transform=__A , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 209 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = '''roformer'''
def __init__( self, A=50_000, A=None, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=1_536, A=2, A=0.02, A=1E-12, A=0, A=False, A=True, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=A, **A )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = rotary_value
SCREAMING_SNAKE_CASE : str = use_cache
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'sequence'}
SCREAMING_SNAKE_CASE : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 28 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ : str = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 105 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase ( a__ ):
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ):
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : str = eval_examples
_SCREAMING_SNAKE_CASE : List[str] = post_process_function
def __SCREAMING_SNAKE_CASE ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE : int = self.get_eval_dataloader(lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.compute_metrics
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_SCREAMING_SNAKE_CASE : List[Any] = time.time()
try:
_SCREAMING_SNAKE_CASE : str = eval_loop(
lowerCamelCase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
_SCREAMING_SNAKE_CASE : Optional[Any] = compute_metrics
_SCREAMING_SNAKE_CASE : int = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_SCREAMING_SNAKE_CASE : Optional[int] = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions )
_SCREAMING_SNAKE_CASE : str = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_SCREAMING_SNAKE_CASE : Any = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
else:
_SCREAMING_SNAKE_CASE : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase_ )
return metrics
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_test_dataloader(lowerCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE : Tuple = self.compute_metrics
_SCREAMING_SNAKE_CASE : Any = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_SCREAMING_SNAKE_CASE : Any = time.time()
try:
_SCREAMING_SNAKE_CASE : List[Any] = eval_loop(
lowerCamelCase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
_SCREAMING_SNAKE_CASE : List[Any] = compute_metrics
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE : Optional[int] = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions , "predict" )
_SCREAMING_SNAKE_CASE : Dict = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_SCREAMING_SNAKE_CASE : Optional[Any] = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase_ )
| 710 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase_ : Tuple = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : int=None, lowerCamelCase__ : Any=None, lowerCamelCase__ : Any=None ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = True
while ask_again:
_SCREAMING_SNAKE_CASE : List[str] = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict=[], lowerCamelCase__ : Optional[int]=None, lowerCamelCase__ : str=0 ) -> str:
_SCREAMING_SNAKE_CASE : int = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any] ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> Dict:
_SCREAMING_SNAKE_CASE : int = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = super()._format_usage(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Any = usage.replace("<command> [<args>] " , "" )
return usage
| 295 | 0 |
def lowerCAmelCase_ ( snake_case_ ):
_A : int = 1
_A : Dict = 2
while i * i <= n:
_A : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCAmelCase_ ( ):
_A : List[Any] = 1
_A : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(_lowercase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 307 | import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Tuple = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Dict = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Any = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : List[str] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : List[str]="<pad>" , UpperCamelCase__ : Dict=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : str = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : List[Any] = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = vocab_file
SCREAMING_SNAKE_CASE : Dict = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : Optional[int] = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 248 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '▁'
UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
UpperCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
UpperCAmelCase = {'vinai/bartpho-syllable': 1024}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = monolingual_vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCAmelCase = {}
lowerCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase = cnt
cnt += 1
with open(A_ , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
lowerCAmelCase = line.strip().split()[0]
lowerCAmelCase = len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase = len(self.fairseq_tokens_to_ids )
lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> str:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self ) -> Dict:
return len(self.fairseq_ids_to_tokens )
def __snake_case ( self ) -> Any:
lowerCAmelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def __snake_case ( self , A_ ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __snake_case ( self , A_ ) -> Optional[int]:
return self.fairseq_ids_to_tokens[index]
def __snake_case ( self , A_ ) -> Union[str, Any]:
lowerCAmelCase = """""".join(A_ ).replace(A_ , """ """ ).strip()
return out_string
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file | 344 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , A_ , )
super().__init__(*A_ , **A_ ) | 344 | 1 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCamelCase : Dict = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def UpperCamelCase_ ( __a , __a , __a , __a , __a=False , __a=True ) -> Tuple:
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
a__, a__, a__, a__ : List[str] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
a__ : Optional[Any] = cached_file(__a , __a , force_download=not use_cached_models )
a__ : Tuple = config_class.from_json_file(__a )
a__ : List[Any] = True
a__ : Optional[int] = True
print(f'''Building TensorFlow model from configuration: {config}''' )
a__ : List[str] = model_class(__a )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
a__ : int = cached_file(
__a , __a , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
a__ : List[str] = load_pytorch_checkpoint_in_tfa_model(__a , __a )
if compare_with_pt_model:
a__ : Optional[int] = tf_model(tf_model.dummy_inputs , training=__a ) # build the network
a__ : Union[str, Any] = torch.load(__a , map_location="cpu" )
a__ : Optional[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__a , config=__a , state_dict=__a )
with torch.no_grad():
a__ : Dict = pt_model(**pt_model.dummy_inputs )
a__ : str = pto[0].numpy()
a__ : Union[str, Any] = tfo[0].numpy()
a__ : Any = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__a , save_format="h5" )
def UpperCamelCase_ ( __a , __a , __a=None , __a=None , __a=False , __a=False , __a=False , __a=False , ) -> Dict:
if args_model_type is None:
a__ : str = list(MODEL_CLASSES.keys() )
else:
a__ : int = [args_model_type]
for j, model_type in enumerate(__a , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(__a )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
a__, a__, a__, a__, a__ : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
a__ : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
a__ : Optional[Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__a , __a ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
a__ : Any = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(__a )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
a__ : Dict = cached_file(__a , __a , force_download=not use_cached_models )
else:
a__ : str = config_shortcut_name
if model_shortcut_name in aws_model_maps:
a__ : List[str] = cached_file(__a , __a , force_download=not use_cached_models )
else:
a__ : Optional[Any] = model_shortcut_name
if os.path.isfile(__a ):
a__ : int = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=__a , pytorch_checkpoint_path=__a , config_file=__a , tf_dump_path=os.path.join(__a , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=__a , )
if remove_cached_files:
os.remove(__a )
os.remove(__a )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
UpperCamelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 37 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase ( snake_case : Optional[Any] , snake_case : Optional[int] ):
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase ( snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Tuple ):
_lowerCAmelCase:List[str] = tmp_path / '''cache'''
_lowerCAmelCase:str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase:List[Any] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_sql_dataset(snake_case , snake_case )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase ( snake_case : List[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Tuple ):
_lowerCAmelCase:Union[str, Any] = tmp_path / '''cache'''
_lowerCAmelCase:Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCAmelCase:List[Any] = features.copy() if features else default_expected_features
_lowerCAmelCase:Dict = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase:Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=snake_case , cache_dir=snake_case ).read()
_check_sql_dataset(snake_case , snake_case )
def UpperCAmelCase ( snake_case : List[str] ):
with contextlib.closing(sqlitea.connect(snake_case ) ) as con:
_lowerCAmelCase:Tuple = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase ( snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Dict ):
_lowerCAmelCase:Dict = tmp_path / '''cache'''
_lowerCAmelCase:Optional[int] = os.path.join(snake_case , '''tmp.sql''' )
_lowerCAmelCase:Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case ).read()
SqlDatasetWriter(snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_lowerCAmelCase:int = iter_sql_file(snake_case )
_lowerCAmelCase:Any = iter_sql_file(snake_case )
for rowa, rowa in zip(snake_case , snake_case ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : Any , snake_case : Union[str, Any] ):
_lowerCAmelCase:Dict = tmp_path / '''cache'''
_lowerCAmelCase:Any = os.path.join(snake_case , '''tmp.sql''' )
_lowerCAmelCase:int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case ).read()
SqlDatasetWriter(snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_lowerCAmelCase:List[str] = iter_sql_file(snake_case )
_lowerCAmelCase:Tuple = iter_sql_file(snake_case )
for rowa, rowa in zip(snake_case , snake_case ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase ( snake_case : Dict , snake_case : Tuple , snake_case : Optional[int] ):
_lowerCAmelCase:List[str] = tmp_path / '''cache'''
_lowerCAmelCase:List[str] = os.path.join(snake_case , '''tmp.sql''' )
_lowerCAmelCase:Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case ).read()
with pytest.raises(snake_case ):
SqlDatasetWriter(snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 227 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : int ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : Any ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : int="facebook/mbart-large-en-ro" , snake_case__ : Any=False , snake_case__ : int=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 22 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 1 |
"""simple docstring"""
from __future__ import annotations
def a ( __UpperCAmelCase : int | str ) -> bool:
__magic_name__: List[str] = str(__UpperCAmelCase )
return n == n[::-1]
def a ( __UpperCAmelCase : int = 1_0_0_0_0_0_0 ) -> Tuple:
__magic_name__: int = 0
for i in range(1 , __UpperCAmelCase ):
if is_palindrome(__UpperCAmelCase ) and is_palindrome(bin(__UpperCAmelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 96 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = '''rwkv'''
UpperCamelCase : Optional[int] = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int]=50277 , UpperCAmelCase__ : List[Any]=1024 , UpperCAmelCase__ : Dict=4096 , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=1E-5 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : List[Any] , ) -> Optional[int]:
_a : Dict = vocab_size
_a : List[Any] = context_length
_a : int = hidden_size
_a : Dict = num_hidden_layers
_a : List[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
_a : Any = intermediate_size if intermediate_size is not None else 4 * hidden_size
_a : int = layer_norm_epsilon
_a : List[str] = rescale_every
_a : List[str] = use_cache
_a : List[str] = bos_token_id
_a : Optional[int] = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 389 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : int = DanceDiffusionPipeline
a_ : List[str] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a_ : List[str] = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
a_ : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a_ : int = False
a_ : Union[str, Any] = False
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[str] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCamelCase_ , use_timestep_embedding=UpperCamelCase_ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
_lowerCamelCase : List[str] = IPNDMScheduler()
_lowerCamelCase : Tuple = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCAmelCase ( self , A , A=0 ):
if str(UpperCamelCase_ ).startswith('mps' ):
_lowerCamelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
_lowerCamelCase : str = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowerCamelCase : Optional[int] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : int = self.get_dummy_components()
_lowerCamelCase : Optional[int] = DanceDiffusionPipeline(**UpperCamelCase_ )
_lowerCamelCase : int = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCamelCase : str = self.get_dummy_inputs(UpperCamelCase_ )
_lowerCamelCase : Optional[Any] = pipe(**UpperCamelCase_ )
_lowerCamelCase : Any = output.audios
_lowerCamelCase : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCamelCase : Union[str, Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCAmelCase ( self ):
return super().test_save_load_local()
@skip_mps
def _lowerCAmelCase ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCAmelCase ( self ):
return super().test_save_load_optional_components()
@skip_mps
def _lowerCAmelCase ( self ):
return super().test_attention_slicing_forward_pass()
def _lowerCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = torch_device
_lowerCamelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCamelCase : Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(generator=UpperCamelCase_ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
_lowerCamelCase : Union[str, Any] = output.audios
_lowerCamelCase : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCamelCase : List[str] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = torch_device
_lowerCamelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
_lowerCamelCase : Union[str, Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(generator=UpperCamelCase_ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
_lowerCamelCase : Any = output.audios
_lowerCamelCase : Any = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCamelCase : Dict = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 704 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Any = DebertaTokenizer
a_ : Dict = True
a_ : Optional[Any] = DebertaTokenizerFast
def _lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
_lowerCamelCase : int = dict(zip(A , range(len(A ) ) ) )
_lowerCamelCase : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCamelCase : List[str] = {'unk_token': '[UNK]'}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def _lowerCAmelCase ( self , **A ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , A ):
_lowerCamelCase : Union[str, Any] = 'lower newer'
_lowerCamelCase : Optional[int] = 'lower newer'
return input_text, output_text
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : List[Any] = 'lower newer'
_lowerCamelCase : Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_lowerCamelCase : List[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = tokenizer('Hello' , 'World' )
_lowerCamelCase : Tuple = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , A )
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowerCamelCase : Dict = tokenizer.encode('sequence builders' , add_special_tokens=A )
_lowerCamelCase : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=A )
_lowerCamelCase : str = tokenizer.encode(
'sequence builders' , add_special_tokens=A , add_prefix_space=A )
_lowerCamelCase : Tuple = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=A , add_prefix_space=A )
_lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(A )
_lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_lowerCamelCase : List[Any] = tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowerCamelCase : Optional[Any] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
_lowerCamelCase : Optional[Any] = tokenizer(A , padding=A )
_lowerCamelCase : List[Any] = [tokenizer.decode(A , skip_special_tokens=A ) for seq in encoding['input_ids']]
# fmt: off
_lowerCamelCase : Optional[Any] = {
'input_ids': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowerCamelCase : Tuple = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , A )
for expected, decoded in zip(A , A ):
self.assertEqual(A , A )
| 349 | 0 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a_ : Dict = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def __snake_case ( UpperCAmelCase_ : Dict=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowercase ) )
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = None
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = dataset_module_factory(UpperCamelCase , cache_dir=UpperCamelCase )
lowerCamelCase_ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase )
lowerCamelCase_ = builder_cls(
cache_dir=UpperCamelCase , config_name=UpperCamelCase , hash=dataset_module.hash , )
lowerCamelCase_ = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
lowerCamelCase_ = cached_path(UpperCamelCase , cache_dir=UpperCamelCase )
self.assertTrue(os.path.exists(UpperCamelCase ) )
@pytest.mark.integration
def __snake_case ( UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
lowerCamelCase_ = dataset_module_factory("wikipedia" , cache_dir=UpperCAmelCase_ )
lowerCamelCase_ = import_main_class(dataset_module.module_path )
lowerCamelCase_ = builder_cls(
cache_dir=UpperCAmelCase_ , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCamelCase_ = None
builder_instance.download_and_prepare()
lowerCamelCase_ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __snake_case ( UpperCAmelCase_ : Dict ):
lowerCamelCase_ = dataset_module_factory("wikipedia" , cache_dir=UpperCAmelCase_ )
lowerCamelCase_ = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase_ )
lowerCamelCase_ = builder_cls(
cache_dir=UpperCAmelCase_ , config_name="20220301.frr" , hash=dataset_module.hash , )
lowerCamelCase_ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert "train" in ds
assert isinstance(ds["train"] , UpperCAmelCase_ )
assert next(iter(ds["train"] ) )
| 675 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : int = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
a_ : Any = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
a_ : List[Any] = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = RoFormerTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or pre_tok_state.get("strip_accents" , UpperCamelCase ) != strip_accents
):
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = do_lower_case
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = BertPreTokenizer()
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
lowerCamelCase_ = self.__dict__["_tokenizer"].get_vocab()
lowerCamelCase_ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = BertPreTokenizer()
return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
| 675 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def UpperCAmelCase ( A : Callable , A : float , A : float , A : float , A : float ):
'''simple docstring'''
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(A ):
_UpperCAmelCase = y[k] + step_size * ode_func(A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase__ : List[str] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCamelCase__ : str = {"""facebook/blenderbot_small-90M""": 5_1_2}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
snake_case__ = set(__lowerCAmelCase )
return pairs
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Tuple = VOCAB_FILES_NAMES
__lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self:Dict , _a:List[Any] , _a:List[Any] , _a:Dict="__start__" , _a:Optional[Any]="__end__" , _a:Tuple="__unk__" , _a:int="__null__" , **_a:Optional[Any] , ):
super().__init__(unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
snake_case__ = json.load(_a )
snake_case__ = {v: k for k, v in self.encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
snake_case__ = merges_handle.read().split('''\n''' )[1:-1]
snake_case__ = [tuple(merge.split() ) for merge in merges]
snake_case__ = dict(zip(_a , range(len(_a ) ) ) )
snake_case__ = {}
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:str ):
if token in self.cache:
return self.cache[token]
snake_case__ = re.sub('''([.,!?()])''' , r''' \1''' , _a )
snake_case__ = re.sub('''(\')''' , r''' \1 ''' , _a )
snake_case__ = re.sub(r'''\s{2,}''' , ''' ''' , _a )
if "\n" in token:
snake_case__ = token.replace('''\n''' , ''' __newln__''' )
snake_case__ = token.split(''' ''' )
snake_case__ = []
for token in tokens:
if not len(_a ):
continue
snake_case__ = token.lower()
snake_case__ = tuple(_a )
snake_case__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
snake_case__ = get_pairs(_a )
if not pairs:
words.append(_a )
continue
while True:
snake_case__ = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(_a ):
try:
snake_case__ = word.index(_a , _a )
new_word.extend(word[i:j] )
snake_case__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ = tuple(_a )
snake_case__ = new_word
if len(_a ) == 1:
break
else:
snake_case__ = get_pairs(_a )
snake_case__ = '''@@ '''.join(_a )
snake_case__ = word[:-4]
snake_case__ = word
words.append(_a )
return " ".join(_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str ):
snake_case__ = []
snake_case__ = re.findall(r'''\S+\n?''' , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str ):
snake_case__ = token.lower()
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[str] ):
snake_case__ = ''' '''.join(_a ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str , _a:Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
snake_case__ = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
snake_case__ = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 33 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , _a , _a , _a = None , _a = 50_257 , _a = 1_024 , _a = 768 , _a = 12 , _a = 12 , _a = None , _a = "gelu_new" , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 1e-5 , _a = 0.02 , _a = True , _a = True , _a = False , _a = False , ):
"""simple docstring"""
super().__init__()
lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
lowerCamelCase = prefix_inner_dim
lowerCamelCase = prefix_hidden_dim
lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , _a ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase = GPTaConfig(
vocab_size=_a , n_positions=_a , n_embd=_a , n_layer=_a , n_head=_a , n_inner=_a , activation_function=_a , resid_pdrop=_a , embd_pdrop=_a , attn_pdrop=_a , layer_norm_epsilon=_a , initializer_range=_a , scale_attn_weights=_a , use_cache=_a , scale_attn_by_inverse_layer_idx=_a , reorder_and_upcast_attn=_a , )
lowerCamelCase = GPTaLMHeadModel(_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , _a = None , ):
"""simple docstring"""
lowerCamelCase = self.transformer.transformer.wte(_a )
lowerCamelCase = self.encode_prefix(_a )
lowerCamelCase = self.decode_prefix(_a )
lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCamelCase = self.transformer(inputs_embeds=_a , labels=_a , attention_mask=_a )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
return torch.zeros(_a , self.prefix_length , dtype=torch.intaa , device=_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.encode_prefix(_a )
@torch.no_grad()
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = torch.split(_a , 1 , dim=0 )
lowerCamelCase = []
lowerCamelCase = []
for feature in features:
lowerCamelCase = self.decode_prefix(feature.to(_a ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase , lowerCamelCase = self.generate_beam(
input_embeds=_a , device=_a , eos_token_id=_a )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase = torch.stack(_a )
lowerCamelCase = torch.stack(_a )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , _a = 5 , _a = 67 , _a = 1.0 , _a = None , ):
"""simple docstring"""
lowerCamelCase = eos_token_id
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = torch.ones(_a , device=_a , dtype=torch.int )
lowerCamelCase = torch.zeros(_a , device=_a , dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase = input_embeds
else:
lowerCamelCase = self.transformer.transformer.wte(_a )
for i in range(_a ):
lowerCamelCase = self.transformer(inputs_embeds=_a )
lowerCamelCase = outputs.logits
lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase , lowerCamelCase = logits.topk(_a , -1 )
lowerCamelCase = generated.expand(_a , *generated.shape[1:] )
lowerCamelCase , lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase = next_tokens
else:
lowerCamelCase = tokens.expand(_a , *tokens.shape[1:] )
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCamelCase = -float(np.inf )
lowerCamelCase = 0
lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase = scores_sum / seq_lengths[:, None]
lowerCamelCase , lowerCamelCase = scores_sum_average.view(-1 ).topk(_a , -1 )
lowerCamelCase = next_tokens // scores_sum.shape[1]
lowerCamelCase = seq_lengths[next_tokens_source]
lowerCamelCase = next_tokens % scores_sum.shape[1]
lowerCamelCase = next_tokens.unsqueeze(1 )
lowerCamelCase = tokens[next_tokens_source]
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
lowerCamelCase = generated[next_tokens_source]
lowerCamelCase = scores_sum_average * seq_lengths
lowerCamelCase = is_stopped[next_tokens_source]
lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
lowerCamelCase = is_stopped + next_tokens.eq(_a ).squeeze()
if is_stopped.all():
break
lowerCamelCase = scores / seq_lengths
lowerCamelCase = scores.argsort(descending=_a )
# tokens tensors are already padded to max_seq_length
lowerCamelCase = [tokens[i] for i in order]
lowerCamelCase = torch.stack(_a , dim=0 )
lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 543 | 0 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a__ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def a__ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def a__ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE ):
http_head("https://huggingface.co" )
| 681 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase_ ( a_ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowercase ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_lowercase ,'num_attention_heads' ) )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Tuple ,A_ : Union[str, Any]=13 ,A_ : Any=64 ,A_ : Tuple=3 ,A_ : Dict=3 ,A_ : str=2 ,A_ : Tuple=1 ,A_ : Union[str, Any]=16 ,A_ : Optional[int]=[128, 256, 384] ,A_ : int=[4, 6, 8] ,A_ : List[str]=[2, 3, 4] ,A_ : Union[str, Any]=[16, 16, 16] ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=[2, 2, 2] ,A_ : Any=[2, 2, 2] ,A_ : str=0.02 ,A_ : Any=True ,A_ : List[Any]=True ,A_ : Optional[int]=2 ,) -> Optional[int]:
A = parent
A = batch_size
A = image_size
A = num_channels
A = kernel_size
A = stride
A = padding
A = hidden_sizes
A = num_attention_heads
A = depths
A = key_dim
A = drop_path_rate
A = patch_size
A = attention_ratio
A = mlp_ratio
A = initializer_range
A = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
A = is_training
A = use_labels
A = num_labels
A = initializer_range
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : List[str] ,A_ : Union[str, Any] ) -> Dict:
A = LevitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase )
A = (self.image_size, self.image_size)
A = image_size[0], image_size[1]
for _ in range(4 ):
A = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
A = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : str ) -> Any:
A = self.num_labels
A = LevitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
A = model(_lowercase ,labels=_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = self.prepare_config_and_inputs()
A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_lowerCamelCase: Tuple = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowerCamelCase: Tuple = False
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: Any = False
_lowerCamelCase: Optional[Any] = False
_lowerCamelCase: Dict = False
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
A = LevitModelTester(self )
A = ConfigTester(self ,config_class=_lowercase ,has_text_modality=_lowercase ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
pass
@unittest.skip(reason='Levit does not output attentions' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowercase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowercase )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
def check_hidden_states_output(A_ : Union[str, Any] ,A_ : Optional[Any] ,A_ : Union[str, Any] ):
A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowercase ,_lowercase ) )
A = outputs.hidden_states
A = len(self.model_tester.depths ) + 1
self.assertEqual(len(_lowercase ) ,_lowercase )
A = (self.model_tester.image_size, self.model_tester.image_size)
A = image_size[0], image_size[1]
for _ in range(4 ):
A = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
A = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(_lowercase ,_lowercase ,_lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(_lowercase ,_lowercase ,_lowercase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int ,A_ : str ,A_ : Tuple=False ) -> Optional[Any]:
A = super()._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
if not self.model_tester.is_training:
return
A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowercase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
A = model_class(_lowercase )
model.to(_lowercase )
model.train()
A = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
A = model(**_lowercase ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A = False
A = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
A = model_class(_lowercase )
model.gradient_checkpointing_enable()
model.to(_lowercase )
model.train()
A = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
A = model(**_lowercase ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = self.model_tester.prepare_config_and_inputs_for_common()
A = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowercase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
A = problem_type["""title"""]
A = problem_type["""num_labels"""]
A = model_class(_lowercase )
model.to(_lowercase )
model.train()
A = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
if problem_type["num_labels"] > 1:
A = inputs["""labels"""].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
A = inputs["""labels"""].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowercase ) as warning_list:
A = model(**_lowercase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = LevitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_lowercase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=_lowercase ,return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
A = model(**_lowercase )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_lowercase )
A = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowercase ,atol=1e-4 ) ) | 91 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Tuple = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
__snake_case : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _UpperCAmelCase ( a__):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a_ : Tuple = model_type_to_module_name(a__)
a_ : Any = importlib.import_module(f'''.{module_name}''' , """transformers.models""")
try:
return getattr(a__ , a__)
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a__ , """__name__""" , a__) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a_ : Tuple = importlib.import_module("""transformers""")
if hasattr(a__ , a__):
return getattr(a__ , a__)
return None
def _UpperCAmelCase ( a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ):
'''simple docstring'''
a_ : List[str] = get_file_from_repo(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""")
return {}
with open(a__ , encoding="""utf-8""") as reader:
return json.load(a__)
class A__:
"""simple docstring"""
def __init__( self ) -> List[str]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def UpperCamelCase__ ( cls , _lowercase , **_lowercase ) -> Dict:
a_ : Tuple = kwargs.pop("""config""" , _lowercase )
a_ : Dict = kwargs.pop("""trust_remote_code""" , _lowercase )
a_ : Dict = True
a_ , a_ : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_lowercase , **_lowercase )
a_ : Tuple = config_dict.get("""feature_extractor_type""" , _lowercase )
a_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
a_ : Any = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
a_ : List[str] = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.feature_extractor_type``
a_ : List[Any] = getattr(_lowercase , """feature_extractor_type""" , _lowercase )
if hasattr(_lowercase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
a_ : List[str] = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
a_ : int = feature_extractor_class_from_name(_lowercase )
a_ : Dict = feature_extractor_auto_map is not None
a_ : Union[str, Any] = feature_extractor_class is not None or type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING
a_ : Optional[Any] = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
a_ : int = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
a_ : Tuple = kwargs.pop("""code_revision""" , _lowercase )
if os.path.isdir(_lowercase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING:
a_ : List[str] = FEATURE_EXTRACTOR_MAPPING[type(_lowercase )]
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> List[Any]:
FEATURE_EXTRACTOR_MAPPING.register(_lowercase , _lowercase )
| 540 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Optional[Any] , a_ : str , a_ : Optional[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
def __call__( self : Optional[Any] ):
"""simple docstring"""
__snake_case = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__snake_case = 1
__snake_case = self.unet(a_ , a_ ).sample
__snake_case = self.scheduler.step(a_ , a_ , a_ ).prev_sample
__snake_case = scheduler_output - scheduler_output + torch.ones_like(a_ )
return result
| 680 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : List[Any] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 367 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE__ = (720, 1_280) # Height, Width
SCREAMING_SNAKE_CASE__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE__ = 1 / 100
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = 250
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = get_dataset(a , a )
for index in range(a ):
SCREAMING_SNAKE_CASE_ :Any = random.sample(range(len(a ) ) , 4 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = update_image_and_anno(
a , a , a , a , a , filter_scale=a , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE_ :int = random_chars(32 )
SCREAMING_SNAKE_CASE_ :Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
SCREAMING_SNAKE_CASE_ :Dict = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
for anno in new_annos:
SCREAMING_SNAKE_CASE_ :Any = anno[3] - anno[1]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = anno[4] - anno[2]
SCREAMING_SNAKE_CASE_ :Any = anno[1] + width / 2
SCREAMING_SNAKE_CASE_ :Optional[int] = anno[2] + height / 2
SCREAMING_SNAKE_CASE_ :Optional[int] = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(a )
with open(F"{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = []
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for label_file in glob.glob(os.path.join(a , "*.txt" ) ):
SCREAMING_SNAKE_CASE_ :List[str] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(a ) as in_file:
SCREAMING_SNAKE_CASE_ :List[Any] = in_file.readlines()
SCREAMING_SNAKE_CASE_ :Optional[Any] = os.path.join(a , F"{label_name}.jpg" )
SCREAMING_SNAKE_CASE_ :Dict = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE_ :Dict = obj_list.rstrip("\n" ).split(" " )
SCREAMING_SNAKE_CASE_ :Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ :Dict = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE_ :List[Any] = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE_ :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def lowercase ( a , a , a , a , a , a = 0.0 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE_ :Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ :List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE_ :Any = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE_ :List[Any] = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE_ :Any = []
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for i, index in enumerate(a ):
SCREAMING_SNAKE_CASE_ :Optional[int] = all_img_list[index]
path_list.append(a )
SCREAMING_SNAKE_CASE_ :Tuple = all_annos[index]
SCREAMING_SNAKE_CASE_ :Any = cva.imread(a )
if i == 0: # top-left
SCREAMING_SNAKE_CASE_ :int = cva.resize(a , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ :Any = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Tuple = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ :Optional[Any] = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ :List[Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ :Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE_ :Dict = cva.resize(a , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE_ :Optional[int] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Dict = bbox[2] * scale_y
SCREAMING_SNAKE_CASE_ :Optional[int] = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE_ :List[str] = cva.resize(a , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ :int = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Tuple = bbox[1] * scale_x
SCREAMING_SNAKE_CASE_ :Dict = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = bbox[3] * scale_x
SCREAMING_SNAKE_CASE_ :List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE_ :Optional[Any] = cva.resize(
a , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ :Optional[int] = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE_ :Dict = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE_ :List[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE_ :Optional[int] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase ( a ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE_ :Dict = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 631 | 0 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
def update_area_of_max_square(__lowerCAmelCase , __lowerCAmelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case__ : Any = update_area_of_max_square(__lowerCAmelCase , col + 1 )
snake_case__ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
snake_case__ : List[Any] = update_area_of_max_square(row + 1 , __lowerCAmelCase )
if mat[row][col]:
snake_case__ : Optional[Any] = 1 + min([right, diagonal, down] )
snake_case__ : List[Any] = max(largest_square_area[0] , __lowerCAmelCase )
return sub_problem_sol
else:
return 0
snake_case__ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case__ : Any = update_area_of_max_square_using_dp_array(__lowerCAmelCase , col + 1 , __lowerCAmelCase )
snake_case__ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __lowerCAmelCase )
snake_case__ : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , __lowerCAmelCase , __lowerCAmelCase )
if mat[row][col]:
snake_case__ : int = 1 + min([right, diagonal, down] )
snake_case__ : Tuple = max(largest_square_area[0] , __lowerCAmelCase )
snake_case__ : Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case__ : Any = [0]
snake_case__ : Union[str, Any] = [[-1] * cols for _ in range(__lowerCAmelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , __lowerCAmelCase )
return largest_square_area[0]
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : List[str] = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case__ : List[str] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case__ : List[Any] = dp_array[row][col + 1]
snake_case__ : Union[str, Any] = dp_array[row + 1][col + 1]
snake_case__ : List[str] = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case__ : Optional[int] = 1 + min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Union[str, Any] = max(dp_array[row][col] , __lowerCAmelCase )
else:
snake_case__ : int = 0
return largest_square_area
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = [0] * (cols + 1)
snake_case__ : Optional[Any] = [0] * (cols + 1)
snake_case__ : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case__ : Dict = current_row[col + 1]
snake_case__ : List[str] = next_row[col + 1]
snake_case__ : Tuple = next_row[col]
if mat[row][col] == 1:
snake_case__ : Tuple = 1 + min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[int] = max(current_row[col] , __lowerCAmelCase )
else:
snake_case__ : Tuple = 0
snake_case__ : str = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 219 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case__ : Any = flax_key_tuple[:-1] + ('''weight''',)
snake_case__ : Tuple = torch.permute(__lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ):
# linear layer
snake_case__ : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
snake_case__ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case__ : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if "metadata" in layer:
snake_case__ : Union[str, Any] = layer.split('''metadata''' )
snake_case__ : Dict = ''''''.join(split_layer[0] )[:-1]
snake_case__ : str = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
snake_case__ : Tuple = layer.split('''kvstore''' )
snake_case__ : int = ''''''.join(split_layer[0] )[:-1]
snake_case__ : Union[str, Any] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
snake_case__ : Union[str, Any] = layer.split('''/''' )
snake_case__ : List[str] = '''/'''.join(split_layer[:-1] )
snake_case__ : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case__ : Any = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
snake_case__ : Any = '''file'''
else:
snake_case__ : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : str = rename_keys(__lowerCAmelCase )
snake_case__ : List[str] = {}
for k, v in current_block.items():
snake_case__ : List[str] = v
snake_case__ : Any = new_current_block
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = WEIGHTS_NAME ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple = convert_file_size_to_int(__lowerCAmelCase )
snake_case__ : Optional[int] = []
snake_case__ : Any = {}
snake_case__ : Any = 0
snake_case__ : List[str] = 0
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
snake_case__ : Optional[int] = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
snake_case__ : List[str] = flatten_dict(__lowerCAmelCase , sep='''/''' )
snake_case__ : Optional[Any] = {}
for layer in checkpoint_info.keys():
snake_case__ , snake_case__ , snake_case__ : Any = get_key_and_tensorstore_dict(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if curr_real_layer_name in all_layers:
snake_case__ : List[Any] = content
else:
snake_case__ : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case__ : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case__ : str = torch.tensor(__lowerCAmelCase )
snake_case__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case__ , snake_case__ : Optional[int] = rename_base_flax_keys(tuple(key.split('''/''' ) ) , __lowerCAmelCase )
snake_case__ : List[str] = '''/'''.join(__lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case__ : Optional[int] = os.path.join(
__lowerCAmelCase , weights_name.replace('''.bin''' , f"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__lowerCAmelCase , __lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case__ : List[str] = {}
snake_case__ : str = 0
snake_case__ : int = raw_weights.to(getattr(__lowerCAmelCase , __lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case__ : int = os.path.join(__lowerCAmelCase , weights_name.replace('''.bin''' , f"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__lowerCAmelCase , __lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case__ : Optional[Any] = {}
snake_case__ : List[Any] = {}
for idx, shard in enumerate(__lowerCAmelCase ):
snake_case__ : List[Any] = weights_name.replace(
'''.bin''' , f"""-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
snake_case__ : List[Any] = os.path.join(__lowerCAmelCase , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
snake_case__ : Optional[Any] = shard
for key in shard:
snake_case__ : int = shard_file
# Add the metadata
snake_case__ : int = {'''total_size''': total_size}
snake_case__ : Dict = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case__ : Optional[Any] = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + '''\n'''
f.write(__lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
A__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case__ : Dict = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
snake_case__ : Tuple = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
snake_case__ : Any = TaTokenizer.from_pretrained('''t5-small''' )
snake_case__ : List[str] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
snake_case__ : Any = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ).input_ids
snake_case__ : Dict = model.generate(__lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 219 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = 1, 1
_UpperCAmelCase = []
for i in range(1 , n + 1 ):
_UpperCAmelCase = prev_numerator + 2 * prev_denominator
_UpperCAmelCase = prev_numerator + prev_denominator
if len(str(SCREAMING_SNAKE_CASE_ ) ) > len(str(SCREAMING_SNAKE_CASE_ ) ):
result.append(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = numerator
_UpperCAmelCase = denominator
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 1 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.inf
def set_batch_size(_snake_case ) -> None:
nonlocal batch_size
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = min(_snake_case ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = min(_snake_case ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_snake_case ,_snake_case ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(_snake_case ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_snake_case ,_snake_case )
return None if batch_size is np.inf else batch_size
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> str:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Tuple = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ : Optional[Any] = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
SCREAMING_SNAKE_CASE__ : Any = Parquet(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , hash=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
if self.streaming:
SCREAMING_SNAKE_CASE__ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : str = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ : Any = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = dataset
SCREAMING_SNAKE_CASE__ : Any = path_or_buf
SCREAMING_SNAKE_CASE__ : Tuple = batch_size or get_writer_batch_size(dataset.features )
SCREAMING_SNAKE_CASE__ : int = parquet_writer_kwargs
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
SCREAMING_SNAKE_CASE__ : List[Any] = self._write(file_obj=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
else:
SCREAMING_SNAKE_CASE__ : str = self._write(file_obj=self.path_or_buf , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
return written
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = parquet_writer_kwargs.pop("""path_or_buf""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE__ : Optional[int] = pq.ParquetWriter(SCREAMING_SNAKE_CASE__ , schema=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , SCREAMING_SNAKE_CASE__ ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
SCREAMING_SNAKE_CASE__ : Any = query_table(
table=self.dataset._data , key=slice(SCREAMING_SNAKE_CASE__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(SCREAMING_SNAKE_CASE__ )
written += batch.nbytes
writer.close()
return written
| 712 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_ ( _snake_case ,_snake_case=7 ):
SCREAMING_SNAKE_CASE__ : Dict = None
if token is not None:
SCREAMING_SNAKE_CASE__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE__ : List[str] = """636036"""
SCREAMING_SNAKE_CASE__ : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
SCREAMING_SNAKE_CASE__ : str = requests.get(_snake_case ,headers=_snake_case ).json()
return result["workflow_runs"]
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = get_daily_ci_runs(_snake_case )
SCREAMING_SNAKE_CASE__ : int = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = workflow_run["""id"""]
break
return workflow_run_id
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = get_last_daily_ci_runs(_snake_case )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE__ : Tuple = get_artifacts_links(worflow_run_id=_snake_case ,token=_snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE__ : Tuple = artifacts_links[artifact_name]
download_artifact(
artifact_name=_snake_case ,artifact_url=_snake_case ,output_dir=_snake_case ,token=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
get_last_daily_ci_artifacts(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(_snake_case ,f'''{artifact_name}.zip''' )
if os.path.isfile(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
with zipfile.ZipFile(_snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(_snake_case ):
# read the file
with z.open(_snake_case ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = f.read().decode("""UTF-8""" )
return results
| 545 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Any = 1
UpperCamelCase_ : List[str] = 3
UpperCamelCase_ : Optional[int] = (3_2, 3_2)
UpperCamelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case )
return image
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=snake_case , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
return CLIPTextModel(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : Any = self.dummy_cond_unet_upscale
UpperCamelCase_ : str = DDPMScheduler()
UpperCamelCase_ : List[Any] = DDIMScheduler(prediction_type='v_prediction' )
UpperCamelCase_ : List[str] = self.dummy_vae
UpperCamelCase_ : Dict = self.dummy_text_encoder
UpperCamelCase_ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ : str = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
UpperCamelCase_ : str = StableDiffusionUpscalePipeline(
unet=snake_case , low_res_scheduler=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , max_noise_level=3_5_0 , )
UpperCamelCase_ : List[str] = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase_ : List[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase_ : Tuple = torch.Generator(device=snake_case ).manual_seed(0 )
UpperCamelCase_ : Optional[int] = sd_pipe(
[prompt] , image=snake_case , generator=snake_case , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_ : Any = output.images
UpperCamelCase_ : str = torch.Generator(device=snake_case ).manual_seed(0 )
UpperCamelCase_ : List[Any] = sd_pipe(
[prompt] , image=snake_case , generator=snake_case , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , return_dict=snake_case , )[0]
UpperCamelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
UpperCamelCase_ : List[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCamelCase_ : Dict = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : Tuple = self.dummy_cond_unet_upscale
UpperCamelCase_ : List[str] = DDPMScheduler()
UpperCamelCase_ : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction' )
UpperCamelCase_ : Union[str, Any] = self.dummy_vae
UpperCamelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCamelCase_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ : Optional[int] = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
UpperCamelCase_ : int = StableDiffusionUpscalePipeline(
unet=snake_case , low_res_scheduler=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , max_noise_level=3_5_0 , )
UpperCamelCase_ : List[str] = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase_ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase_ : List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_ : Optional[Any] = output.images
assert image.shape[0] == 2
UpperCamelCase_ : Tuple = torch.Generator(device=snake_case ).manual_seed(0 )
UpperCamelCase_ : int = sd_pipe(
[prompt] , image=snake_case , generator=snake_case , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_ : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.dummy_cond_unet_upscale
UpperCamelCase_ : List[Any] = DDPMScheduler()
UpperCamelCase_ : Optional[Any] = DDIMScheduler(prediction_type='v_prediction' )
UpperCamelCase_ : List[str] = self.dummy_vae
UpperCamelCase_ : List[str] = self.dummy_text_encoder
UpperCamelCase_ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ : int = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
UpperCamelCase_ : str = unet.half()
UpperCamelCase_ : List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase_ : Tuple = StableDiffusionUpscalePipeline(
unet=snake_case , low_res_scheduler=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , max_noise_level=3_5_0 , )
UpperCamelCase_ : Union[str, Any] = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase_ : int = 'A painting of a squirrel eating a burger'
UpperCamelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = sd_pipe(
[prompt] , image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='np' , ).images
UpperCamelCase_ : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCamelCase_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
UpperCamelCase_ : List[str] = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCamelCase_ : Tuple = StableDiffusionUpscalePipeline.from_pretrained(snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
UpperCamelCase_ : Dict = 'a cat sitting on a park bench'
UpperCamelCase_ : Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase_ : List[str] = pipe(
prompt=snake_case , image=snake_case , generator=snake_case , output_type='np' , )
UpperCamelCase_ : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCamelCase_ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
UpperCamelCase_ : int = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCamelCase_ : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
snake_case , torch_dtype=torch.floataa , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
UpperCamelCase_ : Optional[Any] = 'a cat sitting on a park bench'
UpperCamelCase_ : List[str] = torch.manual_seed(0 )
UpperCamelCase_ : List[str] = pipe(
prompt=snake_case , image=snake_case , generator=snake_case , output_type='np' , )
UpperCamelCase_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase_ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCamelCase_ : Any = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCamelCase_ : Any = StableDiffusionUpscalePipeline.from_pretrained(
snake_case , torch_dtype=torch.floataa , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase_ : str = 'a cat sitting on a park bench'
UpperCamelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCamelCase_ : List[str] = pipe(
prompt=snake_case , image=snake_case , generator=snake_case , num_inference_steps=5 , output_type='np' , )
UpperCamelCase_ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 417 | import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
a_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
a_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : str , snake_case : Any , snake_case : Optional[Any]=4 , snake_case : str=False ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = compute_bleu(
reference_corpus=snake_case , translation_corpus=snake_case , max_order=snake_case , smooth=snake_case )
((UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_)) : int = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 417 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionPanoramaPipeline
lowerCamelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : Any =DDIMScheduler()
torch.manual_seed(0 )
lowercase : int =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : int =CLIPTextModel(UpperCAmelCase__ )
lowercase : int =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : Tuple ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any=0 ):
'''simple docstring'''
lowercase : Tuple =torch.manual_seed(UpperCAmelCase__ )
lowercase : List[Any] ={
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] =self.get_dummy_components()
lowercase : Dict =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : List[str] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : int =np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : int =self.get_dummy_components()
lowercase : List[Any] =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : Tuple =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : str ='''french fries'''
lowercase : str =sd_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
lowercase : Dict =output.images
lowercase : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Optional[Any] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] =self.get_dummy_components()
lowercase : Optional[Any] =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : Tuple =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =sd_pipe(**UpperCAmelCase__ , view_batch_size=2 )
lowercase : Dict =output.images
lowercase : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : List[str] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str =self.get_dummy_components()
lowercase : str =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowercase : Union[str, Any] =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : List[Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Optional[int] =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Tuple =np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[int] =self.get_dummy_components()
lowercase : Tuple =PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , skip_prk_steps=UpperCAmelCase__ )
lowercase : str =StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
lowercase : List[Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : str =np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int=0 ):
'''simple docstring'''
lowercase : Union[str, Any] =torch.manual_seed(UpperCAmelCase__ )
lowercase : int ={
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int ='''stabilityai/stable-diffusion-2-base'''
lowercase : List[Any] =DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
lowercase : List[Any] =StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[int] =self.get_inputs()
lowercase : Union[str, Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Tuple =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase : Any =np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=UpperCAmelCase__ )
lowercase : Tuple =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[int] =self.get_inputs()
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Dict =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase : Any =np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Optional[int] =0
def callback_fn(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.FloatTensor ) -> None:
lowercase : Optional[int] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase : Optional[Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase : Any =latents[0, -3:, -3:, -1]
lowercase : int =np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase : int =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase : List[str] =latents[0, -3:, -3:, -1]
lowercase : Any =np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase : Optional[int] =False
lowercase : Any ='''stabilityai/stable-diffusion-2-base'''
lowercase : str =DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
lowercase : List[Any] =StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
lowercase : List[Any] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : List[Any] =self.get_inputs()
pipe(**UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Optional[int] ='''stabilityai/stable-diffusion-2-base'''
lowercase : int =DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
lowercase : Union[str, Any] =StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
lowercase : List[str] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : str =self.get_inputs()
lowercase : Any =pipe(**UpperCAmelCase__ )
lowercase : Optional[Any] =torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 719 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class __magic_name__ ( A__ ):
lowercase : Union[str, Any] ='''mra'''
def __init__( self : Tuple , UpperCamelCase__ : Any=5_02_65 , UpperCamelCase__ : int=7_68 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Tuple=30_72 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=5_12 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : List[Any]=1e-5 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : str=4 , UpperCamelCase__ : Tuple="full" , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=2 , **UpperCamelCase__ : int , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = block_per_row
UpperCAmelCase = approx_mode
UpperCAmelCase = initial_prior_first_n_blocks
UpperCAmelCase = initial_prior_diagonal_n_blocks
| 323 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( A__ ):
lowercase : Optional[int] =['''pixel_values''']
def __init__( self : Optional[int] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_55 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
UpperCAmelCase = size if size is not None else {"shortest_edge": 2_24}
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase = get_size_dict(UpperCamelCase__ , param_name="crop_size" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase = int((2_56 / 2_24) * size["shortest_edge"] )
UpperCAmelCase = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
UpperCamelCase__ , size=(size_dict["height"], size_dict["width"]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ) -> BatchFeature:
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(UpperCamelCase__ , param_name="crop_size" )
UpperCAmelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 323 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''ConvNextFeatureExtractor''']
lowerCamelCase = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 102 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class snake_case_ ( _a ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 102 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCamelCase_ ( __snake_case ):
a__ = '''efficientformer'''
def __init__( self , __lowerCAmelCase = [3, 2, 6, 4] , __lowerCAmelCase = [4_8, 9_6, 2_2_4, 4_4_8] , __lowerCAmelCase = [True, True, True, True] , __lowerCAmelCase = 4_4_8 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = 4 , __lowerCAmelCase = 7 , __lowerCAmelCase = 5 , __lowerCAmelCase = 8 , __lowerCAmelCase = 4 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 1E-5 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1E-12 , __lowerCAmelCase = 2_2_4 , __lowerCAmelCase = 1E-05 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_A )
__magic_name__ :List[Any] = hidden_act
__magic_name__ :Dict = hidden_dropout_prob
__magic_name__ :str = hidden_sizes
__magic_name__ :List[Any] = num_hidden_layers
__magic_name__ :str = num_attention_heads
__magic_name__ :Optional[int] = initializer_range
__magic_name__ :Optional[Any] = layer_norm_eps
__magic_name__ :str = patch_size
__magic_name__ :Tuple = num_channels
__magic_name__ :Optional[Any] = depths
__magic_name__ :Union[str, Any] = mlp_expansion_ratio
__magic_name__ :Dict = downsamples
__magic_name__ :int = dim
__magic_name__ :str = key_dim
__magic_name__ :Optional[int] = attention_ratio
__magic_name__ :Union[str, Any] = resolution
__magic_name__ :Optional[Any] = pool_size
__magic_name__ :Optional[Any] = downsample_patch_size
__magic_name__ :Union[str, Any] = downsample_stride
__magic_name__ :Optional[int] = downsample_pad
__magic_name__ :Optional[int] = drop_path_rate
__magic_name__ :Optional[Any] = num_metaad_blocks
__magic_name__ :List[Any] = distillation
__magic_name__ :Optional[Any] = use_layer_scale
__magic_name__ :str = layer_scale_init_value
__magic_name__ :List[Any] = image_size
__magic_name__ :Any = batch_norm_eps
| 0 | """simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase_ = 300 # TEMPERATURE (unit = K)
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> float:
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( datasets.BuilderConfig):
'''simple docstring'''
__magic_name__ : Optional[datasets.Features] = None
__magic_name__ : str = "utf-8"
__magic_name__ : Optional[str] = None
__magic_name__ : Optional[str] = None
__magic_name__ : bool = True # deprecated
__magic_name__ : Optional[int] = None # deprecated
__magic_name__ : int = 10 << 20 # 10MB
__magic_name__ : Optional[bool] = None
class SCREAMING_SNAKE_CASE_ ( datasets.ArrowBasedBuilder):
'''simple docstring'''
__magic_name__ : int = JsonConfig
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
snake_case__ : List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.")
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features)
def UpperCAmelCase ( self , lowerCamelCase__) -> List[str]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
snake_case__ : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(snake_case_ , (str, list, tuple)):
snake_case__ : List[Any] = data_files
if isinstance(snake_case_ , snake_case_):
snake_case__ : Dict = [files]
snake_case__ : str = [dl_manager.iter_files(snake_case_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
snake_case__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_):
snake_case__ : int = [files]
snake_case__ : Optional[int] = [dl_manager.iter_files(snake_case_) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"files": files}))
return splits
def UpperCAmelCase ( self , lowerCamelCase__) -> Dict:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
snake_case__ : Optional[int] = self.config.features.arrow_schema.field(snake_case_).type
snake_case__ : Union[str, Any] = pa_table.append_column(snake_case_ , pa.array([None] * len(snake_case_) , type=snake_case_))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ : Tuple = table_cast(snake_case_ , self.config.features.arrow_schema)
return pa_table
def UpperCAmelCase ( self , lowerCamelCase__) -> Tuple:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
snake_case__ : Union[str, Any] = json.load(snake_case_)
# We keep only the field we are interested in
snake_case__ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case_ , (list, tuple)):
snake_case__ : Any = set().union(*[row.keys() for row in dataset])
snake_case__ : str = {col: [row.get(snake_case_) for row in dataset] for col in keys}
else:
snake_case__ : List[str] = dataset
snake_case__ : Tuple = pa.Table.from_pydict(snake_case_)
yield file_idx, self._cast_table(snake_case_)
# If the file has one json object per line
else:
with open(snake_case_ , "rb") as f:
snake_case__ : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case__ : Optional[Any] = max(self.config.chunksize // 32 , 16 << 10)
snake_case__ : Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
snake_case__ : int = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case_)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case__ : Optional[int] = batch.decode(self.config.encoding , errors=snake_case_).encode("utf-8")
try:
while True:
try:
snake_case__ : List[Any] = paj.read_json(
io.BytesIO(snake_case_) , read_options=paj.ReadOptions(block_size=snake_case_))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case_ , pa.ArrowInvalid)
and "straddling" not in str(snake_case_)
or block_size > len(snake_case_)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(snake_case_)} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
snake_case__ : Optional[Any] = json.load(snake_case_)
except json.JSONDecodeError:
logger.error(f"""Failed to read file \'{file}\' with error {type(snake_case_)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case_ , snake_case_): # list is the only sequence type supported in JSON
try:
snake_case__ : Union[str, Any] = set().union(*[row.keys() for row in dataset])
snake_case__ : Union[str, Any] = {col: [row.get(snake_case_) for row in dataset] for col in keys}
snake_case__ : Union[str, Any] = pa.Table.from_pydict(snake_case_)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file \'{file}\' with error {type(snake_case_)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(snake_case_)
break
else:
logger.error(f"""Failed to read file \'{file}\' with error {type(snake_case_)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_)
batch_idx += 1
| 721 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A__ ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
if not is_accelerate_available():
return method
snake_case__ : Tuple = version.parse(accelerate.__version__ ).base_version
if version.parse(_UpperCAmelCase ) < version.parse("0.17.0" ):
return method
def wrapper(self : List[str] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Dict ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *_UpperCAmelCase , **_UpperCAmelCase )
return wrapper
| 150 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = """van"""
def __init__(self : List[Any] , __a : Tuple=224 , __a : Any=3 , __a : Any=[7, 3, 3, 3] , __a : Optional[int]=[4, 2, 2, 2] , __a : Optional[int]=[64, 128, 320, 512] , __a : List[Any]=[3, 3, 12, 3] , __a : Optional[int]=[8, 8, 4, 4] , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.02 , __a : str=1E-6 , __a : Any=1E-2 , __a : Tuple=0.0 , __a : str=0.0 , **__a : Dict , ):
super().__init__(**__a )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_sizes
UpperCAmelCase_ = strides
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = mlp_ratios
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = layer_scale_init_value
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = dropout_rate
| 78 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : List[Any] =XLMRobertaTokenizer
UpperCamelCase__ : Union[str, Any] =XLMRobertaTokenizerFast
UpperCamelCase__ : int =True
UpperCamelCase__ : Optional[Any] =True
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Dict =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple ='<pad>'
_lowerCamelCase : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : str =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowercase_ ) , 1002 )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : List[Any] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : int =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase : List[str] =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int =self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_lowerCamelCase : List[Any] =self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_lowerCamelCase : int =tempfile.mkdtemp()
_lowerCamelCase : List[str] =tokenizer_r.save_pretrained(lowercase_ )
_lowerCamelCase : int =tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCamelCase : Optional[Any] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : Any =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : Dict =tempfile.mkdtemp()
_lowerCamelCase : int =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
_lowerCamelCase : Optional[Any] =tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : List[Any] =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : str =tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
_lowerCamelCase : Any =tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase : str =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : List[str] =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
_lowerCamelCase : Union[str, Any] =XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
_lowerCamelCase : Dict =pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any =self.get_tokenizer()
_lowerCamelCase : Optional[int] =self.get_rust_tokenizer()
_lowerCamelCase : Tuple ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Any =tokenizer.tokenize(lowercase_ )
_lowerCamelCase : List[str] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : int =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Dict =self.get_rust_tokenizer()
_lowerCamelCase : Optional[int] =tokenizer.encode(lowercase_ )
_lowerCamelCase : Optional[Any] =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] ='Hello World!'
_lowerCamelCase : Union[str, Any] =[0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_lowerCamelCase : List[str] =[
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] ={'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 464 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_A = logging.getLogger(__name__)
_A = tf.data.AUTOTUNE
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=__lowerCAmelCase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=__lowerCAmelCase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=__lowerCAmelCase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=__lowerCAmelCase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=__lowerCAmelCase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=__lowerCAmelCase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=__lowerCAmelCase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=__lowerCAmelCase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=__lowerCAmelCase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=__lowerCAmelCase , default=1e-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=__lowerCAmelCase , default=1e-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=__lowerCAmelCase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=__lowerCAmelCase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=__lowerCAmelCase , help="Model ID to upload to on the Hugging Face Hub." )
lowerCAmelCase_ = parser.parse_args()
return args
def lowerCamelCase__ ( __lowerCAmelCase : Tuple ):
"""simple docstring"""
try:
if args.tpu_name:
lowerCAmelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowerCAmelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(__lowerCAmelCase )
tf.tpu.experimental.initialize_tpu_system(__lowerCAmelCase )
return tpu
def lowerCamelCase__ ( __lowerCAmelCase : Any ):
"""simple docstring"""
lowerCAmelCase_ = 0
for file in file_list:
lowerCAmelCase_ = file.split("/" )[-1]
lowerCAmelCase_ = re.search(r"-\d+-(\d+)\.tfrecord" , __lowerCAmelCase ).group(1 )
lowerCAmelCase_ = int(__lowerCAmelCase )
num_samples += sample_count
return num_samples
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any]=None ):
"""simple docstring"""
lowerCAmelCase_ = count_samples(__lowerCAmelCase )
lowerCAmelCase_ = tf.data.Dataset.from_tensor_slices(__lowerCAmelCase )
if shuffle:
lowerCAmelCase_ = dataset.shuffle(len(__lowerCAmelCase ) )
lowerCAmelCase_ = tf.data.TFRecordDataset(__lowerCAmelCase , num_parallel_reads=__lowerCAmelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowerCAmelCase_ = dataset.apply(tf.data.experimental.assert_cardinality(__lowerCAmelCase ) )
lowerCAmelCase_ = dataset.map(__lowerCAmelCase , num_parallel_calls=__lowerCAmelCase )
if shuffle:
assert shuffle_buffer_size is not None
lowerCAmelCase_ = dataset.shuffle(args.shuffle_buffer_size )
lowerCAmelCase_ = dataset.batch(__lowerCAmelCase , drop_remainder=__lowerCAmelCase )
lowerCAmelCase_ = dataset.map(__lowerCAmelCase , num_parallel_calls=__lowerCAmelCase )
lowerCAmelCase_ = dataset.prefetch(__lowerCAmelCase )
return dataset
def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if not args.no_tpu:
lowerCAmelCase_ = initialize_tpu(__lowerCAmelCase )
lowerCAmelCase_ = tf.distribute.TPUStrategy(__lowerCAmelCase )
else:
lowerCAmelCase_ = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer )
lowerCAmelCase_ = AutoConfig.from_pretrained(args.pretrained_model_config )
lowerCAmelCase_ = tokenizer.vocab_size
lowerCAmelCase_ = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F"""No .tfrecord files found in {args.train_dataset}.""" )
lowerCAmelCase_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F"""No .tfrecord files found in {args.eval_dataset}.""" )
lowerCAmelCase_ = count_samples(__lowerCAmelCase )
lowerCAmelCase_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowerCAmelCase_ = steps_per_epoch * args.num_epochs
with strategy.scope():
lowerCAmelCase_ = TFAutoModelForMaskedLM.from_config(__lowerCAmelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowerCAmelCase_ , lowerCAmelCase_ = create_optimizer(
num_train_steps=__lowerCAmelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__lowerCAmelCase , metrics=["accuracy"] )
def decode_fn(__lowerCAmelCase : str ):
lowerCAmelCase_ = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__lowerCAmelCase , __lowerCAmelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowerCAmelCase_ = DataCollatorForLanguageModeling(
tokenizer=__lowerCAmelCase , mlm_probability=args.mlm_probability , mlm=__lowerCAmelCase , return_tensors="tf" )
def mask_with_collator(__lowerCAmelCase : Tuple ):
# TF really needs an isin() function
lowerCAmelCase_ = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
lowerCAmelCase_ , lowerCAmelCase_ = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(__lowerCAmelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__lowerCAmelCase , )
return batch
lowerCAmelCase_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowerCAmelCase_ = prepare_dataset(
__lowerCAmelCase , decode_fn=__lowerCAmelCase , mask_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
lowerCAmelCase_ = prepare_dataset(
__lowerCAmelCase , decode_fn=__lowerCAmelCase , mask_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
lowerCAmelCase_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__lowerCAmelCase ) )
model.fit(
__lowerCAmelCase , validation_data=__lowerCAmelCase , epochs=args.num_epochs , callbacks=__lowerCAmelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_A = parse_args()
main(args)
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( __a ):
_lowercase ='''bert'''
def __init__( self , _UpperCamelCase=30_522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3_072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> str:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
class _lowerCAmelCase ( __a ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 279 | 0 |
'''simple docstring'''
a : Union[str, Any] = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
a : Tuple = ['''a''', '''b''', '''c''', '''d''', '''e''']
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
__snake_case = start
# add current to visited
visited.append(_UpperCAmelCase )
__snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__snake_case = topological_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# if all neighbors visited add current to sort
sort.append(_UpperCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
for vertice in vertices:
if vertice not in visited:
__snake_case = topological_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# return sort
return sort
if __name__ == "__main__":
a : List[str] = topological_sort('''a''', [], [])
print(sort)
| 69 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
return " ".join(
"".join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 69 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'realm'
def __init__( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=30522 , UpperCAmelCase__ : Optional[Any]=768 , UpperCAmelCase__ : int=128 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : List[Any]=3072 , UpperCAmelCase__ : Dict="gelu_new" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=1E-12 , UpperCAmelCase__ : Optional[Any]=256 , UpperCAmelCase__ : List[str]=10 , UpperCAmelCase__ : Tuple=1E-3 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Optional[int]=320 , UpperCAmelCase__ : Optional[Any]=13353718 , UpperCAmelCase__ : List[str]=5000 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : str=2 , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
# Common config
lowercase : str =vocab_size
lowercase : Tuple =max_position_embeddings
lowercase : str =hidden_size
lowercase : List[str] =retriever_proj_size
lowercase : int =num_hidden_layers
lowercase : List[str] =num_attention_heads
lowercase : Any =num_candidates
lowercase : List[str] =intermediate_size
lowercase : int =hidden_act
lowercase : str =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =initializer_range
lowercase : List[str] =type_vocab_size
lowercase : Union[str, Any] =layer_norm_eps
# Reader config
lowercase : Optional[int] =span_hidden_size
lowercase : Optional[Any] =max_span_width
lowercase : Dict =reader_layer_norm_eps
lowercase : Any =reader_beam_size
lowercase : List[Any] =reader_seq_len
# Retrieval config
lowercase : Dict =num_block_records
lowercase : Any =searcher_beam_size
| 88 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = AudioLDMPipeline
UpperCAmelCase__ = TEXT_TO_AUDIO_PARAMS
UpperCAmelCase__ = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCAmelCase__ = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
torch.manual_seed(0)
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCAmelCase__ , )
A__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0)
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
A__ = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
A__ = ClapTextModelWithProjection(UpperCAmelCase__)
A__ = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77)
A__ = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCAmelCase__ , )
A__ = SpeechTaHifiGan(UpperCAmelCase__)
A__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any=0) ->List[str]:
'''simple docstring'''
if str(UpperCAmelCase__).startswith('''mps'''):
A__ = torch.manual_seed(UpperCAmelCase__)
else:
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__)
A__ = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = AudioLDMPipeline(**UpperCAmelCase__)
A__ = audioldm_pipe.to(UpperCAmelCase__)
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = audioldm_pipe(**UpperCAmelCase__)
A__ = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase__) == 256
A__ = audio[:10]
A__ = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
A__ = self.get_dummy_components()
A__ = AudioLDMPipeline(**UpperCAmelCase__)
A__ = audioldm_pipe.to(UpperCAmelCase__)
A__ = audioldm_pipe.to(UpperCAmelCase__)
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = 3 * [inputs['''prompt''']]
# forward
A__ = audioldm_pipe(**UpperCAmelCase__)
A__ = output.audios[0]
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = 3 * [inputs.pop('''prompt''')]
A__ = audioldm_pipe.tokenizer(
UpperCAmelCase__ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors='''pt''' , )
A__ = text_inputs['''input_ids'''].to(UpperCAmelCase__)
A__ = audioldm_pipe.text_encoder(
UpperCAmelCase__ , )
A__ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A__ = F.normalize(UpperCAmelCase__ , dim=-1)
A__ = prompt_embeds
# forward
A__ = audioldm_pipe(**UpperCAmelCase__)
A__ = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
A__ = self.get_dummy_components()
A__ = AudioLDMPipeline(**UpperCAmelCase__)
A__ = audioldm_pipe.to(UpperCAmelCase__)
A__ = audioldm_pipe.to(UpperCAmelCase__)
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = 3 * ['''this is a negative prompt''']
A__ = negative_prompt
A__ = 3 * [inputs['''prompt''']]
# forward
A__ = audioldm_pipe(**UpperCAmelCase__)
A__ = output.audios[0]
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = 3 * [inputs.pop('''prompt''')]
A__ = []
for p in [prompt, negative_prompt]:
A__ = audioldm_pipe.tokenizer(
UpperCAmelCase__ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors='''pt''' , )
A__ = text_inputs['''input_ids'''].to(UpperCAmelCase__)
A__ = audioldm_pipe.text_encoder(
UpperCAmelCase__ , )
A__ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A__ = F.normalize(UpperCAmelCase__ , dim=-1)
embeds.append(UpperCAmelCase__)
A__ , A__ = embeds
# forward
A__ = audioldm_pipe(**UpperCAmelCase__)
A__ = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = PNDMScheduler(skip_prk_steps=UpperCAmelCase__)
A__ = AudioLDMPipeline(**UpperCAmelCase__)
A__ = audioldm_pipe.to(UpperCAmelCase__)
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = '''egg cracking'''
A__ = audioldm_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__)
A__ = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase__) == 256
A__ = audio[:10]
A__ = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = PNDMScheduler(skip_prk_steps=UpperCAmelCase__)
A__ = AudioLDMPipeline(**UpperCAmelCase__)
A__ = audioldm_pipe.to(UpperCAmelCase__)
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
A__ = audioldm_pipe(UpperCAmelCase__ , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
A__ = 2
A__ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
A__ = 2
A__ = audioldm_pipe(UpperCAmelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCAmelCase__).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
A__ = 2
A__ = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCAmelCase__).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = AudioLDMPipeline(**UpperCAmelCase__)
A__ = audioldm_pipe.to(UpperCAmelCase__)
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = audioldm_pipe.vocoder.config.sampling_rate
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = audioldm_pipe(audio_length_in_s=0.016 , **UpperCAmelCase__)
A__ = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase__) / vocoder_sampling_rate == 0.016
A__ = audioldm_pipe(audio_length_in_s=0.032 , **UpperCAmelCase__)
A__ = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase__) / vocoder_sampling_rate == 0.032
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
A__ = self.get_dummy_components()
A__ = AudioLDMPipeline(**UpperCAmelCase__)
A__ = audioldm_pipe.to(UpperCAmelCase__)
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = ['''hey''']
A__ = audioldm_pipe(UpperCAmelCase__ , num_inference_steps=1)
A__ = output.audios.shape
assert audio_shape == (1, 256)
A__ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
A__ = SpeechTaHifiGan(UpperCAmelCase__).to(UpperCAmelCase__)
A__ = audioldm_pipe(UpperCAmelCase__ , num_inference_steps=1)
A__ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCAmelCase__)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase__)
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int="cpu" , UpperCAmelCase__ : List[Any]=torch.floataa , UpperCAmelCase__ : str=0) ->str:
'''simple docstring'''
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__)
A__ = np.random.RandomState(UpperCAmelCase__).standard_normal((1, 8, 128, 16))
A__ = torch.from_numpy(UpperCAmelCase__).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__)
A__ = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
A__ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
A__ = audioldm_pipe.to(UpperCAmelCase__)
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_inputs(UpperCAmelCase__)
A__ = 25
A__ = audioldm_pipe(**UpperCAmelCase__).audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase__) == 81_920
A__ = audio[77_230:77_240]
A__ = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315])
A__ = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-2
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
'''simple docstring'''
A__ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
A__ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
A__ = audioldm_pipe.to(UpperCAmelCase__)
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_inputs(UpperCAmelCase__)
A__ = audioldm_pipe(**UpperCAmelCase__).audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase__) == 81_920
A__ = audio[27_780:27_790]
A__ = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212])
A__ = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3e-2
| 87 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class snake_case :
'''simple docstring'''
def __init__( self : int , __lowercase : Dict , __lowercase : int=13 , __lowercase : str=7 , __lowercase : List[str]=True , __lowercase : Union[str, Any]=True , __lowercase : List[Any]=True , __lowercase : Optional[int]=True , __lowercase : Dict=99 , __lowercase : int=64 , __lowercase : Dict=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[Any]=37 , __lowercase : Dict="gelu" , __lowercase : int=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Tuple=512 , __lowercase : List[str]=16 , __lowercase : Dict=2 , __lowercase : int=0.0_2 , __lowercase : Dict=3 , __lowercase : List[str]=4 , __lowercase : Optional[int]=None , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Optional[Any] = use_token_type_ids
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = embedding_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : Optional[int] = scope
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : List[Any] ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def A_ ( self : List[str] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MobileBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
__UpperCAmelCase : int = model(__lowercase , token_type_ids=__lowercase )
__UpperCAmelCase : Tuple = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Tuple , __lowercase : int , __lowercase : Optional[int] , __lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = MobileBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Dict , __lowercase : str , __lowercase : Dict , __lowercase : Tuple , __lowercase : Dict , __lowercase : List[Any] , __lowercase : Any , __lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MobileBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self : List[Any] , __lowercase : List[Any] , __lowercase : Any , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MobileBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self : str , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : str = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : Any = MobileBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Dict , __lowercase : int , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : int = MobileBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : int = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Tuple , __lowercase : str , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : List[Any] = MobileBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Dict = config_and_inputs
__UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Dict = True
def A_ ( self : Any , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Union[str, Any]=False ):
'''simple docstring'''
__UpperCAmelCase : Any = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
__UpperCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
__UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : str = MobileBertModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def A_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowercase )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowercase )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowercase )
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowercase )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowercase )
def A_ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowercase )
def A_ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowercase )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowercase )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->List[Any]:
"""simple docstring"""
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowercase__ :Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(__lowercase )
__UpperCAmelCase : Optional[int] = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__UpperCAmelCase : str = model(__lowercase )[0]
__UpperCAmelCase : Any = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : int = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=__lowercase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__UpperCAmelCase : str = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__UpperCAmelCase : List[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 522 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _A : int = 10**12 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : Any = 0
lowerCAmelCase : Dict = 1
lowerCAmelCase : Dict = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = math.sqrt(_A )
lowerCAmelCase : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( _A : np.ndarray , _A : int , _A : int , _A : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( _A : int , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
lowerCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( _A : np.ndarray , _A : float , _A : float , _A : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : str = np.zeros(img.shape )
lowerCAmelCase : int = get_gauss_kernel(_A , _A )
lowerCAmelCase , lowerCAmelCase : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : int = get_slice(_A , _A , _A , _A )
lowerCAmelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : str = vec_gaussian(_A , _A )
lowerCAmelCase : Optional[int] = np.multiply(_A , _A )
lowerCAmelCase : str = np.multiply(_A , _A )
lowerCAmelCase : Union[str, Any] = np.sum(_A ) / np.sum(_A )
lowerCAmelCase : Tuple = val
return imga
def __UpperCamelCase ( _A : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase : Any = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : int = int(args[4] )
lowerCAmelCase : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = parse_args(sys.argv)
_lowerCAmelCase : str = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowerCAmelCase : Union[str, Any] = img / 255
_lowerCAmelCase : List[str] = out.astype('float32')
_lowerCAmelCase : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowerCAmelCase : Union[str, Any] = out * 255
_lowerCAmelCase : Optional[Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 646 | 0 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase = None , _lowercase = None , _lowercase=None , _lowercase=None ) -> Optional[Any]:
'''simple docstring'''
if not conversation_id:
snake_case_ : Tuple = uuid.uuida()
if past_user_inputs is None:
snake_case_ : List[str] = []
if generated_responses is None:
snake_case_ : List[str] = []
snake_case_ : uuid.UUID = conversation_id
snake_case_ : List[str] = past_user_inputs
snake_case_ : List[str] = generated_responses
snake_case_ : Optional[str] = text
def __eq__( self , _lowercase ) -> Any:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False ) -> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
snake_case_ : Union[str, Any] = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
snake_case_ : Union[str, Any] = text
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case_ : int = None
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
self.generated_responses.append(_lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
snake_case_ : Tuple = """user""" if is_user else """bot"""
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
snake_case_ : str = self.tokenizer.eos_token
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ) -> str:
'''simple docstring'''
snake_case_ : int = {}
snake_case_ : Tuple = {}
snake_case_ : Optional[Any] = {}
if min_length_for_response is not None:
snake_case_ : str = min_length_for_response
if minimum_tokens is not None:
snake_case_ : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ : Tuple = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ : Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _lowercase , _lowercase=0 , **_lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : str = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase__ ( self , _lowercase , _lowercase=3_2 ) -> Dict[str, Any]:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
snake_case_ : Union[str, Any] = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ : List[str] = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
snake_case_ : int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case_ : Optional[int] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase__ ( self , _lowercase , _lowercase=1_0 , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = generate_kwargs.get("""max_length""" , self.model.config.max_length )
snake_case_ : List[str] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
snake_case_ : List[str] = max_length - minimum_tokens
snake_case_ : Dict = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ : List[Any] = model_inputs["""attention_mask"""][:, -trim:]
snake_case_ : List[Any] = model_inputs.pop("""conversation""" )
snake_case_ : Optional[Any] = max_length
snake_case_ : Optional[Any] = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
snake_case_ : int = 1
else:
snake_case_ : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase__ ( self , _lowercase , _lowercase=True ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = model_outputs["""output_ids"""]
snake_case_ : List[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
snake_case_ : List[Any] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.tokenizer.eos_token_id
snake_case_ : List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
snake_case_ : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 58 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 | 1 |
UpperCamelCase__ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCamelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = """Morse code here!"""
print(a__ )
UpperCamelCase__ = encrypt(a__ )
print(a__ )
UpperCamelCase__ = decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 548 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCAmelCase ):
UpperCamelCase__ = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = FlaxAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCAmelCase ):
UpperCamelCase__ = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = FlaxAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = FlaxBertModel.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCAmelCase ):
return model(**__lowerCAmelCase )
eval(**__lowerCAmelCase ).block_until_ready()
@slow
def _lowerCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = FlaxRobertaModel.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCAmelCase ):
return model(**__lowerCAmelCase )
eval(**__lowerCAmelCase ).block_until_ready()
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained("""bert-base""" )
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained(__lowerCAmelCase , revision="""aaaaaa""" )
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(__lowerCAmelCase , """Use `from_pt=True` to load this model""" ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 548 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__A = get_tests_dir("fixtures")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str =mock.Mock()
lowerCamelCase__: Union[str, Any] =500
lowerCamelCase__: Optional[Any] ={}
lowerCamelCase__: Any =HTTPError
lowerCamelCase__: Union[str, Any] ={}
# Download this model to make sure it's in the cache.
lowerCamelCase__: str =ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_) as mock_head:
lowerCamelCase__: Dict =ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__: int =AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
lowerCamelCase__: Any =AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor")
self.assertIsNotNone(UpperCAmelCase_)
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =TOKEN
HfFolder.save_token(UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any]) ->Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =ViTImageProcessor.from_pretrained(UpperCAmelCase_)
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token)
lowerCamelCase__: str =ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase_ , repo_id="test-image-processor" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
lowerCamelCase__: int =ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =ViTImageProcessor.from_pretrained(UpperCAmelCase_)
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token)
lowerCamelCase__: int =ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase_ , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
lowerCamelCase__: Tuple =ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
lowerCamelCase__: Any =CustomImageProcessor.from_pretrained(UpperCAmelCase_)
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
lowerCamelCase__: str =AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=UpperCAmelCase_)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor")
| 59 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__A = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__A = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = DistilBertTokenizer
def __init__(self : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]="[UNK]" , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : Dict="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[str] , ) ->str:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: List[str] =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: Optional[int] =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: Any =tokenize_chinese_chars
lowerCamelCase__: Any =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: str =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None) ->Dict:
'''simple docstring'''
lowerCamelCase__: str =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: str =[self.sep_token_id]
lowerCamelCase__: str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: str =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 59 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __snake_case ( UpperCamelCase__ ):
@staticmethod
def lowerCAmelCase__ ( _A):
SCREAMING_SNAKE_CASE_ = parser.add_parser('download')
download_parser.add_argument(
'--cache-dir' , type=_A , default=_A , help='Path to location to store the models')
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir')
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=_A , help='Name of the model to download')
download_parser.set_defaults(func=_A)
def __init__( self , _A , _A , _A , _A):
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = cache
SCREAMING_SNAKE_CASE_ = force
SCREAMING_SNAKE_CASE_ = trust_remote_code
def lowerCAmelCase__ ( self):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
| 717 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : torch.FloatTensor
class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ ):
@register_to_config
def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 256 , _A = 32 , _A = None , _A = 0.1_8_2_1_5 , _A = "group" , ):
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE_ = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
SCREAMING_SNAKE_CASE_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1)
SCREAMING_SNAKE_CASE_ = VectorQuantizer(_A , _A , beta=0.2_5 , remap=_A , sane_index_shape=_A)
SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1)
# pass init params to Decoder
SCREAMING_SNAKE_CASE_ = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , )
@apply_forward_hook
def lowerCAmelCase__ ( self , _A , _A = True):
SCREAMING_SNAKE_CASE_ = self.encoder(_A)
SCREAMING_SNAKE_CASE_ = self.quant_conv(_A)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_A)
@apply_forward_hook
def lowerCAmelCase__ ( self , _A , _A = False , _A = True):
# also go through quantization layer
if not force_not_quantize:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.quantize(_A)
else:
SCREAMING_SNAKE_CASE_ = h
SCREAMING_SNAKE_CASE_ = self.post_quant_conv(_A)
SCREAMING_SNAKE_CASE_ = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A)
def lowerCAmelCase__ ( self , _A , _A = True):
SCREAMING_SNAKE_CASE_ = sample
SCREAMING_SNAKE_CASE_ = self.encode(_A).latents
SCREAMING_SNAKE_CASE_ = self.decode(_A).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A)
| 620 | 0 |
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase : List[Any] = 1.6_0_2_1e-1_9 # units = C
def snake_case ( snake_case : float , snake_case : float , snake_case : float , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__: int = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A__: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: Any = logging.get_logger(__name__)
A__: List[str] = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "xlm"
__UpperCamelCase : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :int=3_0_1_4_5 , SCREAMING_SNAKE_CASE :List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE :str=1_2 , SCREAMING_SNAKE_CASE :Tuple=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :str=1 , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Any=2_0_4_8**-0.5 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :Tuple=1 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[int]=3 , SCREAMING_SNAKE_CASE :Dict=5 , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :List[Any]="first" , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :List[str]=5 , SCREAMING_SNAKE_CASE :List[str]=5 , SCREAMING_SNAKE_CASE :Tuple=0 , SCREAMING_SNAKE_CASE :Tuple=0 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :Optional[int]=0 , **SCREAMING_SNAKE_CASE :Tuple , ) -> List[str]:
'''simple docstring'''
_a : Tuple =vocab_size
_a : int =emb_dim
_a : Dict =n_layers
_a : List[Any] =n_heads
_a : str =dropout
_a : Tuple =attention_dropout
_a : Dict =gelu_activation
_a : Any =sinusoidal_embeddings
_a : str =causal
_a : str =asm
_a : Tuple =n_langs
_a : str =use_lang_emb
_a : Dict =layer_norm_eps
_a : Union[str, Any] =bos_index
_a : int =eos_index
_a : Optional[int] =pad_index
_a : List[Any] =unk_index
_a : int =mask_index
_a : Any =is_encoder
_a : Tuple =max_position_embeddings
_a : Optional[Any] =embed_init_std
_a : List[Any] =init_std
_a : str =summary_type
_a : Optional[int] =summary_use_proj
_a : List[str] =summary_activation
_a : Tuple =summary_proj_to_labels
_a : List[Any] =summary_first_dropout
_a : Union[str, Any] =start_n_top
_a : Optional[int] =end_n_top
_a : List[Any] =mask_token_id
_a : List[Any] =lang_id
if "n_words" in kwargs:
_a : Dict =kwargs["""n_words"""]
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Optional[Any] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : Tuple ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 506 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :float , __lowerCamelCase :list[float] ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_lowerCAmelCase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCamelCase ) )
return round(__lowerCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | '''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = IFInpaintingSuperResolutionPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def snake_case ( self: Optional[Any] ):
return self._get_superresolution_dummy_components()
def snake_case ( self: List[str] ,a: str ,a: Tuple=0 ):
if str(a ).startswith('mps' ):
__UpperCAmelCase = torch.manual_seed(a )
else:
__UpperCAmelCase = torch.Generator(device=a ).manual_seed(a )
__UpperCAmelCase = floats_tensor((1, 3, 16, 16) ,rng=random.Random(a ) ).to(a )
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(a ) ).to(a )
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(a ) ).to(a )
__UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def snake_case ( self: Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' ,reason='float16 requires CUDA' )
def snake_case ( self: Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case ( self: Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case ( self: List[Any] ):
self._test_save_load_local()
def snake_case ( self: Tuple ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 396 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(snake_case__ , config=snake_case__ )
A_ : Any = downstream_dict["""projector.weight"""]
A_ : Optional[Any] = downstream_dict["""projector.bias"""]
A_ : Union[str, Any] = downstream_dict["""model.post_net.linear.weight"""]
A_ : List[Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : List[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case__ , config=snake_case__ )
A_ : Dict = downstream_dict["""model.linear.weight"""]
A_ : int = downstream_dict["""model.linear.bias"""]
return model
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : List[Any] = UniSpeechSatForXVector.from_pretrained(snake_case__ , config=snake_case__ )
A_ : Dict = downstream_dict["""connector.weight"""]
A_ : Optional[Any] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ : Any = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
A_ : Optional[Any] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
A_ : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A_ : Tuple = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A_ : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A_ : Any = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
A_ : List[Any] = torch.load(snake_case__ , map_location="""cpu""" )
A_ : int = checkpoint["""Downstream"""]
A_ : Optional[int] = UniSpeechSatConfig.from_pretrained(snake_case__ )
A_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
snake_case__ , return_attention_mask=snake_case__ , do_normalize=snake_case__ )
A_ : List[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A_ : List[Any] = convert_classification(snake_case__ , snake_case__ , snake_case__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
A_ : str = convert_diarization(snake_case__ , snake_case__ , snake_case__ )
elif arch.endswith("""ForXVector""" ):
A_ : Dict = convert_xvector(snake_case__ , snake_case__ , snake_case__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
A_ : List[str] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
_lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 480 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_lowerCAmelCase = "http://www.mocksite.com/file1.txt"
_lowerCAmelCase = "\"text\": [\"foo\", \"foo\"]"
_lowerCAmelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_A : Optional[int] = 200
_A : Dict = {"""Content-Length""": """100"""}
_A : List[Any] = {}
def lowerCamelCase(self , **lowerCAmelCase_ ):
return [bytes(lowerCAmelCase_ , """utf-8""" )]
def __UpperCamelCase ( *snake_case__ , **snake_case__ ):
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
import requests
monkeypatch.setattr(snake_case__ , """request""" , snake_case__ )
A_ : Optional[int] = URL
if issubclass(snake_case__ , snake_case__ ):
A_ : str = url
elif issubclass(snake_case__ , snake_case__ ):
A_ : Union[str, Any] = [url]
elif issubclass(snake_case__ , snake_case__ ):
A_ : List[Any] = {"""train""": url}
A_ : Tuple = """dummy"""
A_ : List[str] = """downloads"""
A_ : List[str] = tmp_path
A_ : str = DownloadConfig(
cache_dir=os.path.join(snake_case__ , snake_case__ ) , use_etag=snake_case__ , )
A_ : Any = DownloadManager(dataset_name=snake_case__ , download_config=snake_case__ )
A_ : Dict = dl_manager.download(snake_case__ )
A_ : List[str] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(snake_case__ , snake_case__ ):
A_ : Optional[Any] = [downloaded_paths]
A_ : Tuple = [urls]
elif isinstance(snake_case__ , snake_case__ ):
assert "train" in downloaded_paths.keys()
A_ : Union[str, Any] = downloaded_paths.values()
A_ : Tuple = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(snake_case__ , snake_case__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
A_ : Union[str, Any] = Path(snake_case__ )
A_ : str = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
A_ : List[str] = downloaded_path.read_text()
assert content == CONTENT
A_ : Union[str, Any] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
A_ : List[str] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : str = str(snake_case__ )
if issubclass(snake_case__ , snake_case__ ):
A_ : List[str] = filename
elif issubclass(snake_case__ , snake_case__ ):
A_ : Tuple = [filename]
elif issubclass(snake_case__ , snake_case__ ):
A_ : Dict = {"""train""": filename}
A_ : Union[str, Any] = """dummy"""
A_ : Dict = xz_file.parent
A_ : List[str] = """extracted"""
A_ : int = DownloadConfig(
cache_dir=snake_case__ , use_etag=snake_case__ , )
A_ : str = DownloadManager(dataset_name=snake_case__ , download_config=snake_case__ )
A_ : int = dl_manager.extract(snake_case__ )
A_ : int = paths
for extracted_paths in [extracted_paths]:
if isinstance(snake_case__ , snake_case__ ):
A_ : Dict = [extracted_paths]
A_ : str = [paths]
elif isinstance(snake_case__ , snake_case__ ):
assert "train" in extracted_paths.keys()
A_ : Any = extracted_paths.values()
A_ : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(snake_case__ , snake_case__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
A_ : str = Path(snake_case__ )
A_ : List[str] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(snake_case__ , etag=snake_case__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
A_ : Optional[Any] = extracted_path.read_text()
A_ : Optional[int] = text_file.read_text()
assert extracted_file_content == expected_file_content
def __UpperCamelCase ( snake_case__ , snake_case__ ):
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(snake_case__ , start=1 ):
A_ : List[str] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Optional[int] = request.getfixturevalue(snake_case__ )
A_ : List[str] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ):
_test_jsonl(snake_case__ , snake_case__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : int = request.getfixturevalue(snake_case__ )
A_ : Union[str, Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ):
_test_jsonl(snake_case__ , snake_case__ )
assert num_tar == 1
assert num_jsonl == 2
def __UpperCamelCase ( snake_case__ ):
A_ : int = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(snake_case__ ) , start=1 ):
assert os.path.basename(snake_case__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 480 | 1 |
"""simple docstring"""
from math import factorial
class __A :
def __init__( self : Any , __snake_case : Any , __snake_case : Optional[Any] ) -> List[Any]:
__magic_name__: int = real
if isinstance(__snake_case , __snake_case ):
__magic_name__: int = [1] * rank
else:
__magic_name__: Union[str, Any] = rank
def __repr__( self : int ) -> List[str]:
return (
F'{self.real}+'
F'{"+".join(str(__snake_case )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__magic_name__: Dict = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __snake_case )
def __add__( self : Dict , __snake_case : int ) -> Any:
if not isinstance(__snake_case , __snake_case ):
return Dual(self.real + other , self.duals )
__magic_name__: Optional[Any] = self.duals.copy()
__magic_name__: str = other.duals.copy()
if len(__snake_case ) > len(__snake_case ):
o_dual.extend([1] * (len(__snake_case ) - len(__snake_case )) )
elif len(__snake_case ) < len(__snake_case ):
s_dual.extend([1] * (len(__snake_case ) - len(__snake_case )) )
__magic_name__: Any = []
for i in range(len(__snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __snake_case )
UpperCAmelCase__ = __add__
def __sub__( self : Dict , __snake_case : List[Any] ) -> str:
return self + other * -1
def __mul__( self : Optional[int] , __snake_case : Any ) -> Dict:
if not isinstance(__snake_case , __snake_case ):
__magic_name__: Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __snake_case )
__magic_name__: Any = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __snake_case )
UpperCAmelCase__ = __mul__
def __truediv__( self : List[Any] , __snake_case : Optional[int] ) -> Dict:
if not isinstance(__snake_case , __snake_case ):
__magic_name__: Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __snake_case )
raise ValueError
def __floordiv__( self : Dict , __snake_case : str ) -> Optional[Any]:
if not isinstance(__snake_case , __snake_case ):
__magic_name__: str = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __snake_case )
raise ValueError
def __pow__( self : Tuple , __snake_case : Union[str, Any] ) -> List[str]:
if n < 0 or isinstance(__snake_case , __snake_case ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
__magic_name__: List[str] = self
for _ in range(n - 1 ):
x *= self
return x
def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] ) -> Optional[int]:
if not callable(__UpperCAmelCase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(__UpperCAmelCase , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("""differentiate() requires an int as input for order""" )
__magic_name__: Tuple = Dual(__UpperCAmelCase , 1 )
__magic_name__: int = func(__UpperCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def a ( __UpperCAmelCase : List[str] ) -> Optional[int]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 96 | from __future__ import annotations
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DPTConfig()
if "large" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = [5, 11, 17, 23]
_SCREAMING_SNAKE_CASE = [2_56, 5_12, 10_24, 10_24]
_SCREAMING_SNAKE_CASE = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 1_50
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """ade20k-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ) ,"""r""" ) )
_SCREAMING_SNAKE_CASE = {int(snake_case__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ ,snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.model""" ,"""dpt.encoder""" )
if "pretrained.model" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.model""" ,"""dpt.embeddings""" )
if "patch_embed" in name:
_SCREAMING_SNAKE_CASE = name.replace("""patch_embed""" ,"""patch_embeddings""" )
if "pos_embed" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pos_embed""" ,"""position_embeddings""" )
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "proj" in name and "project" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""proj""" ,"""projection""" )
if "blocks" in name:
_SCREAMING_SNAKE_CASE = name.replace("""blocks""" ,"""layer""" )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""norm2""" ,"""layernorm_after""" )
if "scratch.output_conv" in name:
_SCREAMING_SNAKE_CASE = name.replace("""scratch.output_conv""" ,"""head""" )
if "scratch" in name:
_SCREAMING_SNAKE_CASE = name.replace("""scratch""" ,"""neck""" )
if "layer1_rn" in name:
_SCREAMING_SNAKE_CASE = name.replace("""layer1_rn""" ,"""convs.0""" )
if "layer2_rn" in name:
_SCREAMING_SNAKE_CASE = name.replace("""layer2_rn""" ,"""convs.1""" )
if "layer3_rn" in name:
_SCREAMING_SNAKE_CASE = name.replace("""layer3_rn""" ,"""convs.2""" )
if "layer4_rn" in name:
_SCREAMING_SNAKE_CASE = name.replace("""layer4_rn""" ,"""convs.3""" )
if "refinenet" in name:
_SCREAMING_SNAKE_CASE = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_SCREAMING_SNAKE_CASE = name.replace(F'refinenet{layer_idx}' ,F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
_SCREAMING_SNAKE_CASE = name.replace("""out_conv""" ,"""projection""" )
if "resConfUnit1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""resConfUnit1""" ,"""residual_layer1""" )
if "resConfUnit2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""resConfUnit2""" ,"""residual_layer2""" )
if "conv1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""conv1""" ,"""convolution1""" )
if "conv2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""conv2""" ,"""convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess1.0.project.0""" ,"""neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess2.0.project.0""" ,"""neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess3.0.project.0""" ,"""neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess4.0.project.0""" ,"""neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess1.3""" ,"""neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess1.4""" ,"""neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess2.3""" ,"""neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess2.4""" ,"""neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess3.3""" ,"""neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess4.3""" ,"""neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess4.4""" ,"""neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
_SCREAMING_SNAKE_CASE = name.replace("""pretrained""" ,"""dpt""" )
if "bn" in name:
_SCREAMING_SNAKE_CASE = name.replace("""bn""" ,"""batch_norm""" )
if "head" in name:
_SCREAMING_SNAKE_CASE = name.replace("""head""" ,"""head.head""" )
if "encoder.norm" in name:
_SCREAMING_SNAKE_CASE = name.replace("""encoder.norm""" ,"""layernorm""" )
if "auxlayer" in name:
_SCREAMING_SNAKE_CASE = name.replace("""auxlayer""" ,"""auxiliary_head.head""" )
return name
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[: config.hidden_size, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_dpt_config(snake_case__ )
# load original state_dict from URL
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(snake_case__ ,map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
# read in qkv matrices
read_in_q_k_v(snake_case__ ,snake_case__ )
# load HuggingFace model
_SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(snake_case__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_SCREAMING_SNAKE_CASE = 4_80 if """ade""" in checkpoint_url else 3_84
_SCREAMING_SNAKE_CASE = DPTImageProcessor(size=snake_case__ )
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(snake_case__ ,return_tensors="""pt""" )
# forward pass
_SCREAMING_SNAKE_CASE = model(**snake_case__ ).logits if """ade""" in checkpoint_url else model(**snake_case__ ).predicted_depth
# Assert logits
_SCREAMING_SNAKE_CASE = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(snake_case__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] ,snake_case__ ,atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] ,snake_case__ )
)
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ ,snake_case__ ) ,organization="""nielsr""" ,commit_message="""Add model""" ,use_temp_dir=snake_case__ ,)
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case__ ,snake_case__ ) ,organization="""nielsr""" ,commit_message="""Add image processor""" ,use_temp_dir=snake_case__ ,)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 569 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = ""
__snake_case : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__snake_case : str = None # compression type in fsspec. ex: "gzip"
__snake_case : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: int , UpperCAmelCase_: str = "" , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[dict] = None , **UpperCAmelCase_: Any ):
'''simple docstring'''
super().__init__(self , **UpperCAmelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_SCREAMING_SNAKE_CASE = fsspec.open(
UpperCAmelCase_ , mode="""rb""" , protocol=UpperCAmelCase_ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_SCREAMING_SNAKE_CASE = os.path.basename(self.file.path.split("""::""" )[0] )
_SCREAMING_SNAKE_CASE = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
_SCREAMING_SNAKE_CASE = None
@classmethod
def UpperCamelCase ( cls: str , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return super()._strip_protocol(UpperCAmelCase_ ).lstrip("""/""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
_SCREAMING_SNAKE_CASE = {f["""name"""]: f}
def UpperCamelCase ( self: str , UpperCAmelCase_: str ):
'''simple docstring'''
return self.file.open().read()
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: str = "rb" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: int=True , UpperCAmelCase_: Optional[int]=None , **UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._strip_protocol(UpperCAmelCase_ )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = "bz2"
__snake_case : List[str] = "bz2"
__snake_case : Optional[int] = ".bz2"
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Union[str, Any] = "gzip"
__snake_case : str = "gzip"
__snake_case : str = ".gz"
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = "lz4"
__snake_case : Any = "lz4"
__snake_case : List[Any] = ".lz4"
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = "xz"
__snake_case : int = "xz"
__snake_case : Dict = ".xz"
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Optional[Any] = "zstd"
__snake_case : List[str] = "zstd"
__snake_case : List[str] = ".zst"
def __init__( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: str = "rb" , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[dict] = None , UpperCAmelCase_: int = DEFAULT_BLOCK_SIZE , **UpperCAmelCase_: Union[str, Any] , ):
'''simple docstring'''
super().__init__(
fo=UpperCAmelCase_ , mode=UpperCAmelCase_ , target_protocol=UpperCAmelCase_ , target_options=UpperCAmelCase_ , block_size=UpperCAmelCase_ , **UpperCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_SCREAMING_SNAKE_CASE = self.file.__enter__
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = file_
def __enter__( self: Dict ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self: Optional[int] , *UpperCAmelCase_: Optional[Any] , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
self._file.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __iter__( self: Optional[int] ):
'''simple docstring'''
return iter(self._file )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return next(self._file )
def __getattr__( self: List[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
return getattr(self._file , UpperCAmelCase_ )
def fixed_enter(*UpperCAmelCase_: Dict , **UpperCAmelCase_: List[Any] ):
return WrappedFile(_enter(*UpperCAmelCase_ , **UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = fixed_enter
| 569 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__magic_name__ = logging.getLogger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , a_ , a_ , a_ , a_=None ) -> str:
super().__init__(
a_ , question_encoder_tokenizer=a_ , generator_tokenizer=a_ , index=a_ , init_retrieval=a_ , )
_UpperCAmelCase = None
def _a ( self , a_ ) -> int:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_UpperCAmelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCAmelCase = str(distributed_port + 1 )
_UpperCAmelCase = dist.new_group(ranks=a_ , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _a ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def _a ( self , a_ , a_ , a_=torch.floataa ) -> Dict:
_UpperCAmelCase = torch.empty(a_ , dtype=a_ )
dist.scatter(a_ , src=0 , scatter_list=a_ , group=self.process_group )
return target_tensor
def _a ( self ) -> str:
_UpperCAmelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCAmelCase = next((addr for addr in addrs if addr.startswith("e" )) , a_ )
return ifname
def _a ( self , a_ , a_ ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_UpperCAmelCase , _UpperCAmelCase = self._main_retrieve(a_ , a_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a_ )
# distributed training
_UpperCAmelCase = dist.get_world_size(group=self.process_group )
# gather logic
_UpperCAmelCase = None
if self._is_main():
_UpperCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(a_ )]
dist.gather(torch.tensor(a_ ) , dst=0 , gather_list=a_ , group=self.process_group )
# scatter logic
_UpperCAmelCase = question_hidden_states.shape[0]
_UpperCAmelCase = []
_UpperCAmelCase = []
if self._is_main():
assert len(a_ ) == world_size
_UpperCAmelCase , _UpperCAmelCase = self._main_retrieve(torch.cat(a_ ).numpy() , a_ )
_UpperCAmelCase , _UpperCAmelCase = torch.tensor(a_ ), torch.tensor(a_ )
_UpperCAmelCase = self._chunk_tensor(a_ , a_ )
_UpperCAmelCase = self._chunk_tensor(a_ , a_ )
_UpperCAmelCase = self._scattered(a_ , [n_queries, n_docs] , target_type=torch.intaa )
_UpperCAmelCase = self._scattered(a_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(a_ )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ =[
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
SCREAMING_SNAKE_CASE__ =os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ ={
"""do_resize""": True,
"""size""": {"""height""": 2_2_4, """width""": 2_2_4},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 1_8, """width""": 1_8},
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"""do_convert_rgb""": True,
}
SCREAMING_SNAKE_CASE__ =os.path.join(self.tmpdirname ,_UpperCamelCase )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(_UpperCamelCase ,_UpperCamelCase )
def __A ( self : Any ,**_UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
def __A ( self : Optional[int] ,**_UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
def __A ( self : Optional[int] ,**_UpperCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
def __A ( self : Any ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ =[Image.fromarray(np.moveaxis(_UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ =self.get_image_processor()
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor(tokenizer=_UpperCamelCase ,image_processor=_UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor(tokenizer=_UpperCamelCase ,image_processor=_UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,_UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,_UpperCamelCase )
def __A ( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ =self.get_tokenizer(cls_token="""(CLS)""" ,sep_token="""(SEP)""" )
SCREAMING_SNAKE_CASE__ =self.get_image_processor(do_normalize=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname ,cls_token="""(CLS)""" ,sep_token="""(SEP)""" ,do_normalize=_UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_UpperCamelCase )
def __A ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_image_processor()
SCREAMING_SNAKE_CASE__ =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor(tokenizer=_UpperCamelCase ,image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ =image_processor(_UpperCamelCase ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ =processor(images=_UpperCamelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __A ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_image_processor()
SCREAMING_SNAKE_CASE__ =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor(tokenizer=_UpperCamelCase ,image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ ="""Alexandra,T-shirt的价格是15便士。"""
SCREAMING_SNAKE_CASE__ =processor(text=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_image_processor()
SCREAMING_SNAKE_CASE__ =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor(tokenizer=_UpperCamelCase ,image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ ="""Alexandra,T-shirt的价格是15便士。"""
SCREAMING_SNAKE_CASE__ =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ =processor(text=_UpperCamelCase ,images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def __A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_image_processor()
SCREAMING_SNAKE_CASE__ =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor(tokenizer=_UpperCamelCase ,image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ =processor.batch_decode(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
def __A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_image_processor()
SCREAMING_SNAKE_CASE__ =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ =ChineseCLIPProcessor(tokenizer=_UpperCamelCase ,image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ ="""Alexandra,T-shirt的价格是15便士。"""
SCREAMING_SNAKE_CASE__ =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ =processor(text=_UpperCamelCase ,images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 588 |
def UpperCAmelCase_ ( __UpperCamelCase ):
assert (
isinstance(__UpperCamelCase, __UpperCamelCase ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 1 |
"""simple docstring"""
import os
import sys
lowerCAmelCase__ = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase__ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def snake_case_ ( *A_ : Tuple, **A_ : Any ):
'''simple docstring'''
return AutoConfig.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def snake_case_ ( *A_ : Union[str, Any], **A_ : List[str] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def snake_case_ ( *A_ : Optional[Any], **A_ : Optional[int] ):
'''simple docstring'''
return AutoModel.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def snake_case_ ( *A_ : Any, **A_ : str ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def snake_case_ ( *A_ : Optional[Any], **A_ : Tuple ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def snake_case_ ( *A_ : List[Any], **A_ : Union[str, Any] ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def snake_case_ ( *A_ : List[Any], **A_ : Any ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*_UpperCAmelCase, **_UpperCAmelCase )
| 83 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Tuple = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
lowerCamelCase__ : Any = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : Tuple = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ : Optional[Any] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 295 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = "roberta-prelayernorm"
def __init__( self , UpperCAmelCase__=5_0265 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__="absolute" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 705 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = 1
@register_to_config
def __init__( self , UpperCAmelCase__=2000 , UpperCAmelCase__=0.1 , UpperCAmelCase__=20 , UpperCAmelCase__=1e-3 ):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase__ , device=UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
SCREAMING_SNAKE_CASE__ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
SCREAMING_SNAKE_CASE__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
SCREAMING_SNAKE_CASE__ = std.flatten()
while len(std.shape ) < len(score.shape ):
SCREAMING_SNAKE_CASE__ = std.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = -score / std
# compute
SCREAMING_SNAKE_CASE__ = -1.0 / len(self.timesteps )
SCREAMING_SNAKE_CASE__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
SCREAMING_SNAKE_CASE__ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
SCREAMING_SNAKE_CASE__ = beta_t.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = -0.5 * beta_t * x
SCREAMING_SNAKE_CASE__ = torch.sqrt(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = drift - diffusion**2 * score
SCREAMING_SNAKE_CASE__ = x + drift * dt
# add noise
SCREAMING_SNAKE_CASE__ = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase__ , device=x.device , dtype=x.dtype )
SCREAMING_SNAKE_CASE__ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 112 | 0 |
"""simple docstring"""
_lowercase = [0, 2, 4, 6, 8]
_lowercase = [1, 3, 5, 7, 9]
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
A = 0
for digit in range(10 ):
A = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , snake_case__ , snake_case__ )
return result
A = 0
for digita in range(10 ):
A = digita
if (remainder + digita) % 2 == 0:
A = ODD_DIGITS
else:
A = EVEN_DIGITS
for digita in other_parity_digits:
A = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , snake_case__ , snake_case__ , )
return result
def _snake_case ( snake_case__ : int = 9 ):
A = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(snake_case__ , 0 , [0] * length , snake_case__ )
return result
if __name__ == "__main__":
print(F"""{solution() = }""") | 91 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_lowercase = float('''nan''')
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] ,A_ : Tuple ) -> Any:
A = sys.stdout
A = open(A_ ,'a' )
def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple:
return getattr(self.stdout ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str:
self.stdout.write(A_ )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) )
def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ):
A = []
# deal with critical env vars
A = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
A = os.environ.get(snake_case__ , snake_case__ )
if val is not None:
cmd.append(F'{key}={val}' )
# python executable (not always needed if the script is executable)
A = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(snake_case__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A = []
A = ''
while len(snake_case__ ) > 0:
current_line += F'{cmd.pop(0 )} '
if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case__ )
A = ''
return "\\\n".join(snake_case__ )
def _snake_case ( snake_case__ : str , snake_case__ : str ):
# unwrap multi-line input
A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += F' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
A = variation.replace(' ' , '-' )
with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f:
f.write(result.stdout )
with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f:
A = json.load(snake_case__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ):
A = []
A = []
A = F'{id}: {variation:<{longest_variation_len}}'
A = F'{preamble}: '
A = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ):
A = process_run_single(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A = single_run_metrics[target_metric_key]
if not math.isnan(snake_case__ ):
metrics.append(snake_case__ )
results.append(snake_case__ )
outcome += "✓"
else:
outcome += "✘"
A = F'\33[2K\r{outcome}'
if len(snake_case__ ) > 0:
A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A = round(mean_metrics[target_metric_key] , 2 )
A = F'{outcome} {mean_target}'
if len(snake_case__ ) > 1:
results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}'
print(snake_case__ )
A = variation
return mean_metrics
else:
print(snake_case__ )
return {variation_key: variation, target_metric_key: nan}
def _snake_case ( ):
A = torch.cuda.get_device_properties(torch.device('cuda' ) )
return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
A = pd.DataFrame(snake_case__ )
A = 'variation'
A = 'diff_%'
A = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case__ ):
# as a fallback, use the minimal value as the sentinel
A = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case__ ):
A = df.apply(
lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
A = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A = df.reindex(snake_case__ , axis='columns' ) # reorder cols
# capitalize
A = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' )
A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' )
A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )]
print('\n\n'.join(snake_case__ ) )
def _snake_case ( ):
A = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
A = parser.parse_args()
A = args.output_dir
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
A = get_base_command(snake_case__ , snake_case__ )
# split each dimension into its --foo variations
A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) )
A = max(len(snake_case__ ) for x in variations )
# split wanted keys
A = args.report_metric_keys.split()
# capture prints into a log file for convenience
A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(F'and this script\'s output is also piped into {report_fn}' )
A = Tee(snake_case__ )
print(F'\n*** Running {len(snake_case__ )} benchmarks:' )
print(F'Base command: {" ".join(snake_case__ )}' )
A = 'variation'
A = []
for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ):
A = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) )
process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ )
if __name__ == "__main__":
main() | 91 | 1 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
a :str = "\\n Text data.\n Second line of data."
a :List[Any] = "file"
@pytest.fixture(scope="""session""" )
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE__ : Optional[Any] = bytes(__lowerCAmelCase , """utf-8""" )
with zstd.open(__lowerCAmelCase , """wb""" ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture
def _lowercase ( __lowerCAmelCase ) -> int:
with open(os.path.join(tmpfs.local_root_dir , __lowerCAmelCase ) , """w""" ) as f:
f.write(__lowerCAmelCase )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE__ : Dict = input_paths[compression_format]
SCREAMING_SNAKE_CASE__ : Dict = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Any = DownloadConfig(cache_dir=__lowerCAmelCase , extract_compressed_file=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
with open(__lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = f.read()
with open(__lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE__ : Dict = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """custom_cache"""
SCREAMING_SNAKE_CASE__ : Dict = """custom_extracted_dir"""
SCREAMING_SNAKE_CASE__ : List[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE__ : List[Any] = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , __lowerCAmelCase )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE__ : Optional[int] = xz_file
SCREAMING_SNAKE_CASE__ : str = (
DownloadConfig(extract_compressed_file=__lowerCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCAmelCase )
)
SCREAMING_SNAKE_CASE__ : List[str] = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
assert Path(__lowerCAmelCase ).parent.parts[-2:] == expected
def _lowercase ( __lowerCAmelCase ) -> Any:
# absolute path
SCREAMING_SNAKE_CASE__ : Optional[int] = str(Path(__lowerCAmelCase ).resolve() )
assert cached_path(__lowerCAmelCase ) == text_file
# relative path
SCREAMING_SNAKE_CASE__ : List[str] = str(Path(__lowerCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCAmelCase ) == text_file
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
# absolute path
SCREAMING_SNAKE_CASE__ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
# relative path
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """./__missing_file__.txt"""
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(__lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __lowerCAmelCase )
def _lowercase ( ) -> List[Any]:
with pytest.raises(__lowerCAmelCase ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
http_get("""https://huggingface.co""" , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
ftp_get("""ftp://huggingface.co""" , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
fsspec_get("""s3://huggingface.co""" , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
fsspec_head("""s3://huggingface.co""" )
| 12 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
SCREAMING_SNAKE_CASE__ : Any = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 12 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 | 1 |
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : int = 1_0_0 ) -> int:
"""simple docstring"""
snake_case = (n * (n + 1) // 2) ** 2
snake_case = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 104 | """simple docstring"""
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = [
[],
[],
[],
]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(lowerCAmelCase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
"""simple docstring"""
return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = []
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if len(self.queue ) == 1_00:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
snake_case = min(self.queue )
self.queue.remove(lowerCAmelCase )
return data
def __str__( self ):
"""simple docstring"""
return str(self.queue )
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
snake_case = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 104 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
A: Union[str, Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
A: Tuple = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
A: Tuple = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
A: Any = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.5 ) -> Optional[Any]:
'''simple docstring'''
if NLTK_VERSION >= version.Version("""3.6.5""" ):
UpperCAmelCase : int = [
meteor_score.single_meteor_score(
word_tokenize(_SCREAMING_SNAKE_CASE ) , word_tokenize(_SCREAMING_SNAKE_CASE ) , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE )
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
else:
UpperCAmelCase : List[str] = [
meteor_score.single_meteor_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE )
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return {"meteor": np.mean(_SCREAMING_SNAKE_CASE )}
| 160 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : str , UpperCamelCase : int ):
UpperCAmelCase : List[Any] = word.split()
def justify(UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int ) -> str:
UpperCAmelCase : List[Any] = max_width - width
UpperCAmelCase : Any = len(UpperCamelCase )
if len(UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase : Optional[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase : List[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase : Tuple = []
for i in range(UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : list[str] = []
UpperCAmelCase : Union[str, Any] = 0
for word in words:
if width + len(UpperCamelCase ) + len(UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase )
width += len(UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
# reset new line and new width
UpperCAmelCase , UpperCAmelCase : Tuple = [word], len(UpperCamelCase )
UpperCAmelCase : Optional[Any] = max_width - width - len(UpperCamelCase )
answer.append(""" """.join(UpperCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 160 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCAmelCase( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
"""simple docstring"""
def __init__( self , lowerCamelCase=None , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
super().__init__(features=__lowerCAmelCase )
lowercase__ : str = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __a ( self , lowerCamelCase ) -> int:
"""simple docstring"""
import torch
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and column:
if all(
isinstance(__lowerCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__lowerCAmelCase )
return column
def __a ( self , lowerCamelCase ) -> Any:
"""simple docstring"""
import torch
if isinstance(__lowerCAmelCase , (str, bytes, type(__lowerCAmelCase )) ):
return value
elif isinstance(__lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase__ : Any = {}
if isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase__ : Optional[int] = {"dtype": torch.intaa}
elif isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase__ : Dict = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCAmelCase , PIL.Image.Image ):
lowercase__ : List[str] = np.asarray(__lowerCAmelCase )
return torch.tensor(__lowerCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def __a ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(__lowerCAmelCase , "__array__" ) and not isinstance(__lowerCAmelCase , torch.Tensor ):
lowercase__ : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCAmelCase )
def __a ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return map_nested(self._recursive_tensorize , __lowerCAmelCase , map_list=__lowerCAmelCase )
def __a ( self , lowerCamelCase ) -> int:
"""simple docstring"""
lowercase__ : List[str] = self.numpy_arrow_extractor().extract_row(__lowerCAmelCase )
lowercase__ : Any = self.python_features_decoder.decode_row(__lowerCAmelCase )
return self.recursive_tensorize(__lowerCAmelCase )
def __a ( self , lowerCamelCase ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = self.numpy_arrow_extractor().extract_column(__lowerCAmelCase )
lowercase__ : Optional[Any] = self.python_features_decoder.decode_column(__lowerCAmelCase , pa_table.column_names[0] )
lowercase__ : Tuple = self.recursive_tensorize(__lowerCAmelCase )
lowercase__ : List[str] = self._consolidate(__lowerCAmelCase )
return column
def __a ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = self.numpy_arrow_extractor().extract_batch(__lowerCAmelCase )
lowercase__ : Optional[int] = self.python_features_decoder.decode_batch(__lowerCAmelCase )
lowercase__ : Optional[Any] = self.recursive_tensorize(__lowerCAmelCase )
for column_name in batch:
lowercase__ : str = self._consolidate(batch[column_name] )
return batch | 707 |
__a : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowercase__ : List[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
__a : int = True
__a : int = False
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ : Tuple = chain(next_number(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : int = number_chain
while number < 10_00_00_00:
lowercase__ : Any = number_chain
number *= 10
return number_chain
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 10_00_00_00 ) -> int:
for i in range(1 ,SCREAMING_SNAKE_CASE_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }') | 298 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = VideoToVideoSDPipeline
UpperCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
UpperCamelCase_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
UpperCamelCase_ : Optional[int] = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase_ : Any = False
# No `output_type`.
UpperCamelCase_ : Union[str, Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _A ( self : int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE : str = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextModel(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=0 ):
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("mps" ):
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Any = VideoToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = "np"
SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
SCREAMING_SNAKE_CASE : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _A ( self : int ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _A ( self : str ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _A ( self : Optional[int] ):
pass
def _A ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : List[str] = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = torch.randn((1, 10, 3, 1024, 576) , generator=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = video.to("cuda" )
SCREAMING_SNAKE_CASE : Optional[Any] = "Spiderman is surfing"
SCREAMING_SNAKE_CASE : List[Any] = pipe(__SCREAMING_SNAKE_CASE , video=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=3 , output_type="pt" ).frames
SCREAMING_SNAKE_CASE : List[str] = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 62 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_A : ClassVar[Features] = Features({"audio": Audio()} )
_A : ClassVar[Features] = Features({"labels": ClassLabel} )
_A : str = "audio"
_A : str = "labels"
def __A ( self : int ,_UpperCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,_UpperCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE__ =copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ =self.label_schema.copy()
SCREAMING_SNAKE_CASE__ =features[self.label_column]
SCREAMING_SNAKE_CASE__ =label_schema
return task_template
@property
def __A ( self : Tuple ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 721 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
_A : ClassVar[Features] = Features({"text": Value("string" )} )
_A : ClassVar[Features] = Features({"summary": Value("string" )} )
_A : str = "text"
_A : str = "summary"
@property
def __A ( self : str ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 588 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = prime_factors(__UpperCamelCase)
if is_square_free(__UpperCamelCase):
return -1 if len(__UpperCamelCase) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 557 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self ):
__magic_name__ , __magic_name__ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = jax.device_count()
__magic_name__ = num_samples * [prompt]
__magic_name__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
__magic_name__ = replicate(UpperCamelCase_ )
__magic_name__ = shard(UpperCamelCase_ )
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
__magic_name__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__magic_name__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ = images[0, 253:256, 253:256, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
__magic_name__ = '''stabilityai/stable-diffusion-2'''
__magic_name__ , __magic_name__ = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
__magic_name__ , __magic_name__ = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
__magic_name__ = scheduler_params
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = jax.device_count()
__magic_name__ = num_samples * [prompt]
__magic_name__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
__magic_name__ = replicate(UpperCamelCase_ )
__magic_name__ = shard(UpperCamelCase_ )
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
__magic_name__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__magic_name__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ = images[0, 253:256, 253:256, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 490 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
a : Optional[Any] = 8
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=BITS ) -> str:
__snake_case = x.device
__snake_case = (x * 2_55).int().clamp(0 , 2_55 )
__snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCAmelCase )
__snake_case = rearrange(_UpperCAmelCase , "d -> d 1 1" )
__snake_case = rearrange(_UpperCAmelCase , "b c h w -> b c 1 h w" )
__snake_case = ((x & mask) != 0).float()
__snake_case = rearrange(_UpperCAmelCase , "b c d h w -> b (c d) h w" )
__snake_case = bits * 2 - 1
return bits
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=BITS ) -> Optional[Any]:
__snake_case = x.device
__snake_case = (x > 0).int()
__snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCAmelCase , dtype=torch.intaa )
__snake_case = rearrange(_UpperCAmelCase , "d -> d 1 1" )
__snake_case = rearrange(_UpperCAmelCase , "b (c d) h w -> b c d h w" , d=8 )
__snake_case = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def __UpperCAmelCase ( self : Union[str, Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__snake_case = self.alphas_cumprod[timestep]
__snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__snake_case = self.bit_scale
if self.config.clip_sample:
__snake_case = torch.clamp(_UpperCAmelCase , -scale , _UpperCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__snake_case = self._get_variance(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__snake_case = model_output.device if torch.is_tensor(_UpperCAmelCase ) else "cpu"
__snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_UpperCAmelCase ).to(_UpperCAmelCase )
__snake_case = self._get_variance(_UpperCAmelCase , _UpperCAmelCase ) ** 0.5 * eta * noise
__snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
def __UpperCAmelCase ( self : Union[str, Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Dict="epsilon" , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
__snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__snake_case , __snake_case = torch.split(_UpperCAmelCase , sample.shape[1] , dim=1 )
else:
__snake_case = None
# 1. compute alphas, betas
__snake_case = self.alphas_cumprod[t]
__snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
__snake_case = 1 - alpha_prod_t
__snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__snake_case = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
__snake_case = self.bit_scale
if self.config.clip_sample:
__snake_case = torch.clamp(_UpperCAmelCase , -scale , _UpperCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__snake_case = 0
if t > 0:
__snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_UpperCAmelCase ).to(model_output.device )
__snake_case = (self._get_variance(_UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
__snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Optional[Any] , a_ : UNetaDConditionModel , a_ : Union[DDIMScheduler, DDPMScheduler] , a_ : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
__snake_case = bit_scale
__snake_case = (
ddim_bit_scheduler_step if isinstance(a_ , a_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self : Dict , a_ : Optional[int] = 256 , a_ : Optional[int] = 256 , a_ : Optional[int] = 50 , a_ : Optional[torch.Generator] = None , a_ : Optional[int] = 1 , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : int , ):
"""simple docstring"""
__snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=a_ , )
__snake_case = decimal_to_bits(a_ ) * self.bit_scale
__snake_case = latents.to(self.device )
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__snake_case = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(a_ , a_ , a_ ).prev_sample
__snake_case = bits_to_decimal(a_ )
if output_type == "pil":
__snake_case = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 680 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 1 |
"""simple docstring"""
from torch import nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :int = class_size
lowerCAmelCase__ :Any = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase__ :Tuple = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = self.mlp(__UpperCAmelCase )
return logits
| 93 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = ["""image_processor""", """tokenizer"""]
_a = """CLIPImageProcessor"""
_a = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self, _lowercase=None, _lowercase=None, **_lowercase ) -> int:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', _lowercase, )
SCREAMING_SNAKE_CASE_ = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowercase, _lowercase )
def __call__( self, _lowercase=None, _lowercase=None, _lowercase=None, **_lowercase ) -> str:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowercase, return_tensors=_lowercase, **_lowercase )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(_lowercase, return_tensors=_lowercase, **_lowercase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ), tensor_type=_lowercase )
def a__ ( self, *_lowercase, **_lowercase ) -> Optional[int]:
return self.tokenizer.batch_decode(*_lowercase, **_lowercase )
def a__ ( self, *_lowercase, **_lowercase ) -> Optional[Any]:
return self.tokenizer.decode(*_lowercase, **_lowercase )
@property
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 294 | 0 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__UpperCAmelCase = get_logger(__name__)
class __lowercase ( enum.Enum ):
snake_case_ = """all_checks"""
snake_case_ = """basic_checks"""
snake_case_ = """no_checks"""
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
UpperCAmelCase__ : Tuple = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase__ : Dict = """ for """ + verification_name if verification_name is not None else """"""
if len(__UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
class __lowercase ( __lowerCamelCase ):
pass
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
UpperCAmelCase__ : Union[str, Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__UpperCamelCase ) )
logger.info("""All the splits matched successfully.""" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = True ):
'''simple docstring'''
if record_checksum:
UpperCAmelCase__ : Optional[Any] = shaaaa()
with open(__UpperCamelCase , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = m.hexdigest()
else:
UpperCAmelCase__ : Dict = None
return {"num_bytes": os.path.getsize(__UpperCamelCase ), "checksum": checksum}
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 194 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194 | 1 |
from __future__ import annotations
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =[]
create_all_state(1 , A , A , [] , A )
return result
def _UpperCAmelCase ( A , A , A , A , A , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def _UpperCAmelCase ( A ):
'''simple docstring'''
for i in total_list:
print(*A )
if __name__ == "__main__":
UpperCamelCase_ = 4
UpperCamelCase_ = 2
UpperCamelCase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 625 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
class snake_case_ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = (1_6, 3_2, 9_6, 2_5_6)
__UpperCamelCase = jnp.floataa
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
UpperCAmelCase__ =[]
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase__ =self.block_out_channels[i]
UpperCAmelCase__ =self.block_out_channels[i + 1]
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(A_ )
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(A_ )
UpperCAmelCase__ =blocks
UpperCAmelCase__ =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, A_ ) -> Any:
UpperCAmelCase__ =self.conv_in(A_ )
UpperCAmelCase__ =nn.silu(A_ )
for block in self.blocks:
UpperCAmelCase__ =block(A_ )
UpperCAmelCase__ =nn.silu(A_ )
UpperCAmelCase__ =self.conv_out(A_ )
return embedding
@flax_register_to_config
class snake_case_ ( nn.Module, a, a ):
'''simple docstring'''
__UpperCamelCase = 3_2
__UpperCamelCase = 4
__UpperCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__UpperCamelCase = False
__UpperCamelCase = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__UpperCamelCase = 2
__UpperCamelCase = 8
__UpperCamelCase = None
__UpperCamelCase = 1_2_8_0
__UpperCamelCase = 0.0
__UpperCamelCase = False
__UpperCamelCase = jnp.floataa
__UpperCamelCase = True
__UpperCamelCase = 0
__UpperCamelCase = "rgb"
__UpperCamelCase = (1_6, 3_2, 9_6, 2_5_6)
def __UpperCAmelCase ( self, A_ ) -> FrozenDict:
# init input tensors
UpperCAmelCase__ =(1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase__ =jnp.zeros(A_, dtype=jnp.floataa )
UpperCAmelCase__ =jnp.ones((1,), dtype=jnp.intaa )
UpperCAmelCase__ =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
UpperCAmelCase__ =(1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase__ =jnp.zeros(A_, dtype=jnp.floataa )
UpperCAmelCase__ , UpperCAmelCase__ =jax.random.split(A_ )
UpperCAmelCase__ ={"params": params_rng, "dropout": dropout_rng}
return self.init(A_, A_, A_, A_, A_ )["params"]
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =self.block_out_channels
UpperCAmelCase__ =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase__ =self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase__ =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
UpperCAmelCase__ =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
UpperCAmelCase__ =FlaxTimestepEmbedding(A_, dtype=self.dtype )
UpperCAmelCase__ =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
UpperCAmelCase__ =self.only_cross_attention
if isinstance(A_, A_ ):
UpperCAmelCase__ =(only_cross_attention,) * len(self.down_block_types )
if isinstance(A_, A_ ):
UpperCAmelCase__ =(num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase__ =[]
UpperCAmelCase__ =[]
UpperCAmelCase__ =block_out_channels[0]
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(A_ )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase__ =output_channel
UpperCAmelCase__ =block_out_channels[i]
UpperCAmelCase__ =i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase__ =FlaxCrossAttnDownBlockaD(
in_channels=A_, out_channels=A_, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
UpperCAmelCase__ =FlaxDownBlockaD(
in_channels=A_, out_channels=A_, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(A_ )
for _ in range(self.layers_per_block ):
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(A_ )
if not is_final_block:
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(A_ )
UpperCAmelCase__ =down_blocks
UpperCAmelCase__ =controlnet_down_blocks
# mid
UpperCAmelCase__ =block_out_channels[-1]
UpperCAmelCase__ =FlaxUNetMidBlockaDCrossAttn(
in_channels=A_, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, A_, A_, A_, A_, A_ = 1.0, A_ = True, A_ = False, ) -> Union[FlaxControlNetOutput, Tuple]:
UpperCAmelCase__ =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase__ =jnp.flip(A_, axis=1 )
# 1. time
if not isinstance(A_, jnp.ndarray ):
UpperCAmelCase__ =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(A_, jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase__ =timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase__ =jnp.expand_dims(A_, 0 )
UpperCAmelCase__ =self.time_proj(A_ )
UpperCAmelCase__ =self.time_embedding(A_ )
# 2. pre-process
UpperCAmelCase__ =jnp.transpose(A_, (0, 2, 3, 1) )
UpperCAmelCase__ =self.conv_in(A_ )
UpperCAmelCase__ =jnp.transpose(A_, (0, 2, 3, 1) )
UpperCAmelCase__ =self.controlnet_cond_embedding(A_ )
sample += controlnet_cond
# 3. down
UpperCAmelCase__ =(sample,)
for down_block in self.down_blocks:
if isinstance(A_, A_ ):
UpperCAmelCase__ , UpperCAmelCase__ =down_block(A_, A_, A_, deterministic=not train )
else:
UpperCAmelCase__ , UpperCAmelCase__ =down_block(A_, A_, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase__ =self.mid_block(A_, A_, A_, deterministic=not train )
# 5. contronet blocks
UpperCAmelCase__ =()
for down_block_res_sample, controlnet_block in zip(A_, self.controlnet_down_blocks ):
UpperCAmelCase__ =controlnet_block(A_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase__ =controlnet_down_block_res_samples
UpperCAmelCase__ =self.controlnet_mid_block(A_ )
# 6. scaling
UpperCAmelCase__ =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A_, mid_block_res_sample=A_ )
| 625 | 1 |
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = generate_pascal_triangle(A )
for row_idx in range(A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __lowerCAmelCase ( A ):
if not isinstance(A , A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = []
for current_row_idx in range(A ):
UpperCAmelCase_ = populate_current_row(A , A )
triangle.append(A )
return triangle
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for current_col_idx in range(1 , A ):
calculate_current_element(
A , A , A , A )
return current_row
def __lowerCAmelCase ( A , A , A , A , ):
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ = above_to_left_elt + above_to_right_elt
def __lowerCAmelCase ( A ):
if not isinstance(A , A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = [[1]]
for row_index in range(1 , A ):
UpperCAmelCase_ = [0] + result[-1] + [0]
UpperCAmelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ = sum(divmod(A , 2 ) )
UpperCAmelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ = row_first_half + row_second_half
result.append(A )
return result
def __lowerCAmelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A , A ) -> None:
UpperCAmelCase_ = F"{func.__name__}({value})"
UpperCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 717 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_a: Optional[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( A ):
# initialize config
if "resnet-50" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
UpperCAmelCase_ = DetrConfig(use_timm_backbone=A , backbone_config=A )
# set label attributes
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 250
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(A ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCAmelCase ( A ):
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = state_dict.pop(A )
UpperCAmelCase_ = val
def __lowerCAmelCase ( A , A=False ):
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
UpperCAmelCase_ = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCAmelCase_ = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
UpperCAmelCase_ = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( A , A=None , A=False ):
UpperCAmelCase_ , UpperCAmelCase_ = get_detr_config(A )
# load original model from torch hub
UpperCAmelCase_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F"Converting model {model_name}..." )
UpperCAmelCase_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=A ).eval()
UpperCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(A ):
if is_panoptic:
UpperCAmelCase_ = "detr." + src
rename_key(A , A , A )
# query, key and value matrices need special treatment
read_in_q_k_v(A , is_panoptic=A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(A )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(A )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(A )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(A )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetrForSegmentation(A ) if is_panoptic else DetrForObjectDetection(A )
model.load_state_dict(A )
model.eval()
# verify our conversion on an image
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = DetrImageProcessor(format=A )
UpperCAmelCase_ = processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = detr(A )
UpperCAmelCase_ = model(A )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F"nielsr/{model_name}" )
processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
_a: List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
_a: str = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 268 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : List[str] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase : List[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : List[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Tuple = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase : Any = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase : Optional[Any] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class _UpperCamelCase :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
__lowerCAmelCase = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.""" )
__lowerCAmelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = 6_4 , __UpperCamelCase = 4 , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input["input_ids"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
__lowerCAmelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class _UpperCamelCase (a_ , a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["""input_ids""", """attention_mask"""]
| 367 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self )-> Optional[int]:
__lowerCAmelCase = "laion/clap-htsat-unfused"
__lowerCAmelCase = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Optional[int]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase )
def __UpperCAmelCase ( self , **__UpperCamelCase )-> List[Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__UpperCamelCase )
def __UpperCAmelCase ( self )-> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
def __UpperCAmelCase ( self )-> List[str]:
__lowerCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = self.get_feature_extractor(do_normalize=__UpperCamelCase , padding_value=1.0 )
__lowerCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
def __UpperCAmelCase ( self )-> List[str]:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__lowerCAmelCase = floats_list((3, 1_0_0_0) )
__lowerCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" )
__lowerCAmelCase = processor(audios=__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__lowerCAmelCase = "This is a test string"
__lowerCAmelCase = processor(text=__UpperCamelCase )
__lowerCAmelCase = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__UpperCamelCase )
__lowerCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 367 | 1 |
from collections import deque
class snake_case_ :
"""simple docstring"""
def __init__( self ,lowercase ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : List[str] = process_name # process name
UpperCAmelCase_ : List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase_ : str = arrival_time
UpperCAmelCase_ : str = burst_time # remaining burst time
UpperCAmelCase_ : Optional[int] = 0 # total time of the process wait in ready queue
UpperCAmelCase_ : Tuple = 0 # time from arrival time to completion time
class snake_case_ :
"""simple docstring"""
def __init__( self ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase_ : str = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase_ : str = queue
# current time
UpperCAmelCase_ : int = current_time
# finished process is in this sequence queue
UpperCAmelCase_ : deque[Process] = deque()
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : int = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Dict = []
for i in range(len(lowercase)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowercase)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for i in range(len(lowercase)):
completion_times.append(queue[i].stop_time)
return completion_times
def A_ ( self ,lowercase):
"""simple docstring"""
return [q.burst_time for q in queue]
def A_ ( self ,lowercase):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : deque[Process] = deque() # sequence deque of finished process
while len(lowercase) != 0:
UpperCAmelCase_ : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase_ : Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCAmelCase_ : Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase_ : List[str] = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase)
self.finish_queue.extend(lowercase) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase)):
UpperCAmelCase_ : int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase_ : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase_ : Dict = 0
# set the finish time
UpperCAmelCase_ : Optional[int] = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase_ : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase)
self.finish_queue.extend(lowercase) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A_ ( self):
"""simple docstring"""
for i in range(self.number_of_queues - 1):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.round_robin(
self.ready_queue ,self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
__lowerCamelCase = Process('''P1''', 0, 53)
__lowerCamelCase = Process('''P2''', 0, 17)
__lowerCamelCase = Process('''P3''', 0, 68)
__lowerCamelCase = Process('''P4''', 0, 24)
__lowerCamelCase = 3
__lowerCamelCase = [17, 25]
__lowerCamelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
__lowerCamelCase = Process('''P1''', 0, 53)
__lowerCamelCase = Process('''P2''', 0, 17)
__lowerCamelCase = Process('''P3''', 0, 68)
__lowerCamelCase = Process('''P4''', 0, 24)
__lowerCamelCase = 3
__lowerCamelCase = [17, 25]
__lowerCamelCase = deque([Pa, Pa, Pa, Pa])
__lowerCamelCase = MLFQ(number_of_queues, time_slices, queue, 0)
__lowerCamelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 455 |
import operator
def _snake_case ( __snake_case , __snake_case = False , __snake_case = None ) -> list:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = operator.lt if reverse else operator.gt
UpperCAmelCase_ : int = solution or []
if not arr:
return solution
UpperCAmelCase_ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(__snake_case ):
if _operator(__snake_case , sublist[-1] ):
sublist.append(__snake_case )
arr.pop(__snake_case )
# merging sublist into solution list
if not solution:
solution.extend(__snake_case )
else:
while sublist:
UpperCAmelCase_ : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(__snake_case ):
if not _operator(__snake_case , __snake_case ):
solution.insert(__snake_case , __snake_case )
break
else:
solution.append(__snake_case )
strand_sort(__snake_case , __snake_case , __snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 455 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase : Any = logging.get_logger(__name__)
lowercase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : List[Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowercase : Union[str, Any] = {
"""roberta-base""": 5_1_2,
"""roberta-large""": 5_1_2,
"""roberta-large-mnli""": 5_1_2,
"""distilroberta-base""": 5_1_2,
"""roberta-base-openai-detector""": 5_1_2,
"""roberta-large-openai-detector""": 5_1_2,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[Any] = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = ['''input_ids''', '''attention_mask''']
__A : Tuple = RobertaTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : Optional[int] = add_prefix_space
a__ : Optional[int] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
a__ : Dict = 'post_processor'
a__ : Union[str, Any] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : List[str] = tuple(state['sep'])
if "cls" in state:
a__ : Any = tuple(state['cls'])
a__ : Union[str, Any] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : int = add_prefix_space
a__ : Dict = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : List[str] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Any = getattr(lowercase , state.pop('type'))
a__ : str = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : List[str] = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Any = kwargs.get('is_split_into_words' , lowercase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Dict = kwargs.get('is_split_into_words' , lowercase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Optional[Any]:
'''simple docstring'''
a__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : Union[str, Any] = [self.sep_token_id]
a__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 302 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : "DiagonalGaussianDistribution"
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__A : Union[str, Any] = True
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 4 , lowercase = 32 , lowercase = 32 , lowercase = 0.1_82_15 , ) -> str:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
a__ : List[Any] = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
# pass init params to Decoder
a__ : List[str] = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , norm_num_groups=lowercase , act_fn=lowercase , )
a__ : Optional[Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
a__ : Optional[Any] = nn.Convad(lowercase , lowercase , 1)
a__ : Optional[int] = False
a__ : Dict = False
# only relevant if vae tiling is enabled
a__ : Optional[int] = self.config.sample_size
a__ : List[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
a__ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
a__ : Dict = 0.25
def __lowercase ( self , lowercase , lowercase=False) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowercase , (Encoder, Decoder)):
a__ : Tuple = value
def __lowercase ( self , lowercase = True) -> int:
'''simple docstring'''
a__ : List[Any] = use_tiling
def __lowercase ( self) -> Dict:
'''simple docstring'''
self.enable_tiling(lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = True
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : str = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowercase ( self) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
a__ : int = {}
def fn_recursive_add_processors(lowercase , lowercase , lowercase):
if hasattr(lowercase , 'set_processor'):
a__ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , lowercase , lowercase)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase , lowercase , lowercase)
return processors
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = len(self.attn_processors.keys())
if isinstance(lowercase , lowercase) and len(lowercase) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(lowercase)} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.')
def fn_recursive_attn_processor(lowercase , lowercase , lowercase):
if hasattr(lowercase , 'set_processor'):
if not isinstance(lowercase , lowercase):
module.set_processor(lowercase)
else:
module.set_processor(processor.pop(F'{name}.processor'))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , lowercase , lowercase)
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase , lowercase , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def __lowercase ( self , lowercase , lowercase = True) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowercase , return_dict=lowercase)
if self.use_slicing and x.shape[0] > 1:
a__ : List[str] = [self.encoder(lowercase) for x_slice in x.split(1)]
a__ : str = torch.cat(lowercase)
else:
a__ : Tuple = self.encoder(lowercase)
a__ : Any = self.quant_conv(lowercase)
a__ : Optional[Any] = DiagonalGaussianDistribution(lowercase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase)
def __lowercase ( self , lowercase , lowercase = True) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowercase , return_dict=lowercase)
a__ : Optional[Any] = self.post_quant_conv(lowercase)
a__ : Union[str, Any] = self.decoder(lowercase)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase)
@apply_forward_hook
def __lowercase ( self , lowercase , lowercase = True) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
a__ : List[str] = [self._decode(lowercase).sample for z_slice in z.split(1)]
a__ : str = torch.cat(lowercase)
else:
a__ : Optional[Any] = self._decode(lowercase).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : Dict = min(a.shape[2] , b.shape[2] , lowercase)
for y in range(lowercase):
a__ : Any = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : int = min(a.shape[3] , b.shape[3] , lowercase)
for x in range(lowercase):
a__ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __lowercase ( self , lowercase , lowercase = True) -> AutoencoderKLOutput:
'''simple docstring'''
a__ : Union[str, Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
a__ : Union[str, Any] = int(self.tile_latent_min_size * self.tile_overlap_factor)
a__ : str = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
a__ : Union[str, Any] = []
for i in range(0 , x.shape[2] , lowercase):
a__ : int = []
for j in range(0 , x.shape[3] , lowercase):
a__ : Optional[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
a__ : List[str] = self.encoder(lowercase)
a__ : Any = self.quant_conv(lowercase)
row.append(lowercase)
rows.append(lowercase)
a__ : Dict = []
for i, row in enumerate(lowercase):
a__ : Tuple = []
for j, tile in enumerate(lowercase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a__ : Any = self.blend_v(rows[i - 1][j] , lowercase , lowercase)
if j > 0:
a__ : int = self.blend_h(row[j - 1] , lowercase , lowercase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(lowercase , dim=3))
a__ : Dict = torch.cat(lowercase , dim=2)
a__ : Optional[Any] = DiagonalGaussianDistribution(lowercase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase)
def __lowercase ( self , lowercase , lowercase = True) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
a__ : Tuple = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
a__ : Optional[int] = int(self.tile_sample_min_size * self.tile_overlap_factor)
a__ : List[str] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
a__ : Optional[Any] = []
for i in range(0 , z.shape[2] , lowercase):
a__ : int = []
for j in range(0 , z.shape[3] , lowercase):
a__ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
a__ : List[str] = self.post_quant_conv(lowercase)
a__ : Any = self.decoder(lowercase)
row.append(lowercase)
rows.append(lowercase)
a__ : int = []
for i, row in enumerate(lowercase):
a__ : str = []
for j, tile in enumerate(lowercase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a__ : Tuple = self.blend_v(rows[i - 1][j] , lowercase , lowercase)
if j > 0:
a__ : Optional[Any] = self.blend_h(row[j - 1] , lowercase , lowercase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(lowercase , dim=3))
a__ : Any = torch.cat(lowercase , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase)
def __lowercase ( self , lowercase , lowercase = False , lowercase = True , lowercase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
a__ : int = sample
a__ : int = self.encode(lowercase).latent_dist
if sample_posterior:
a__ : Dict = posterior.sample(generator=lowercase)
else:
a__ : Tuple = posterior.mode()
a__ : Any = self.decode(lowercase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase)
| 302 | 1 |
"""simple docstring"""
def lowercase ( A_ )-> int:
'''simple docstring'''
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
a : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
a : Tuple = 0
a : Any = 2
while digits < n:
index += 1
a : str = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def lowercase ( A_ = 1_000 )-> Optional[int]:
'''simple docstring'''
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 702 |
"""simple docstring"""
import numpy as np
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
a : List[str] = int(np.ceil((x_end - xa) / h ) )
a : Optional[int] = np.zeros((n + 1,) )
a : Tuple = ya
a : Union[str, Any] = xa
for k in range(A_ ):
a : Any = f(A_ , y[k] )
a : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
a : str = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
a : Union[str, Any] = f(x + h , y[k] + h * ka )
a : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | 0 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
SCREAMING_SNAKE_CASE__ : Any = numpy.array([0, 0])
SCREAMING_SNAKE_CASE__ : int = numpy.array([0.5, 0.866_0254])
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.array([1, 0])
SCREAMING_SNAKE_CASE__ : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( UpperCamelCase_ : list[numpy.ndarray] , UpperCamelCase_ : int ) -> list[numpy.ndarray]:
snake_case__ =initial_vectors
for _ in range(UpperCamelCase_ ):
snake_case__ =iteration_step(UpperCamelCase_ )
return vectors
def a ( UpperCamelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
snake_case__ =[]
for i, start_vector in enumerate(vectors[:-1] ):
snake_case__ =vectors[i + 1]
new_vectors.append(UpperCamelCase_ )
snake_case__ =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( UpperCamelCase_ : numpy.ndarray , UpperCamelCase_ : float ) -> numpy.ndarray:
snake_case__ =numpy.radians(UpperCamelCase_ )
snake_case__ , snake_case__ =numpy.cos(UpperCamelCase_ ), numpy.sin(UpperCamelCase_ )
snake_case__ =numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCamelCase_ , UpperCamelCase_ )
def a ( UpperCamelCase_ : list[numpy.ndarray] ) -> None:
snake_case__ =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
snake_case__ , snake_case__ =zip(*UpperCamelCase_ )
plt.plot(UpperCamelCase_ , UpperCamelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Dict = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 538 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''camembert-base''': 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Any = '''▁'''
class a__( snake_case__ ):
a_ : Tuple = VOCAB_FILES_NAMES
a_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : str = ['''input_ids''', '''attention_mask''']
a_ : Union[str, Any] = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case__ =vocab_file
snake_case__ =False if not self.vocab_file else True
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ =[self.cls_token_id]
snake_case__ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
snake_case__ =[self.sep_token_id]
snake_case__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ =os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 538 | 1 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : str = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : Tuple = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__lowerCamelCase : List[str] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__lowerCamelCase : Optional[Any] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 704 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : str , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 418 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , **a_ , ) -> List[Any]:
super().__init__(
a_ , split=a_ , features=a_ , cache_dir=a_ , keep_in_memory=a_ , streaming=a_ , num_proc=a_ , **a_ , )
lowercase : List[str] = field
lowercase : Optional[Any] = path_or_paths if isinstance(a_ , a_ ) else {self.split: path_or_paths}
lowercase : str = Json(
cache_dir=a_ , data_files=a_ , features=a_ , field=a_ , **a_ , )
def a__ ( self ) -> str:
# Build iterable dataset
if self.streaming:
lowercase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase : List[str] = None
lowercase : Union[str, Any] = None
lowercase : Optional[Any] = None
lowercase : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , num_proc=self.num_proc , )
lowercase : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ , a_ , a_ = None , a_ = None , **a_ , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
lowercase : Any = dataset
lowercase : List[str] = path_or_buf
lowercase : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase : Dict = num_proc
lowercase : Dict = "utf-8"
lowercase : Any = to_json_kwargs
def a__ ( self ) -> int:
lowercase : List[Any] = self.to_json_kwargs.pop("path_or_buf" , a_ )
lowercase : List[str] = self.to_json_kwargs.pop("orient" , "records" )
lowercase : Union[str, Any] = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase : Optional[int] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase : int = self.to_json_kwargs.pop("compression" , a_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a_ ) as buffer:
lowercase : Tuple = self._write(file_obj=a_ , orient=a_ , lines=a_ , index=a_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
lowercase : Any = self._write(
file_obj=self.path_or_buf , orient=a_ , lines=a_ , index=a_ , **self.to_json_kwargs )
return written
def a__ ( self , a_ ) -> int:
lowercase , lowercase , lowercase , lowercase , lowercase : int = args
lowercase : List[str] = query_table(
table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=a_ , orient=a_ , lines=a_ , index=a_ , **a_ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def a__ ( self , a_ , a_ , a_ , a_ , **a_ , ) -> int:
lowercase : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a_ )
else:
lowercase , lowercase : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a_ )
return written
| 372 |
'''simple docstring'''
def _A ( A ) -> list:
if len(A ) <= 1:
return [tuple(A )]
lowercase : Dict = []
def generate(A ,A ):
lowercase : List[Any] = [0] * n
res.append(tuple(A ) )
lowercase : Tuple = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowercase , lowercase : List[Any] = arr[i], arr[0]
else:
lowercase , lowercase : Optional[Any] = arr[i], arr[c[i]]
res.append(tuple(A ) )
c[i] += 1
lowercase : Optional[Any] = 0
else:
lowercase : Any = 0
i += 1
generate(len(A ) ,A )
return res
if __name__ == "__main__":
lowerCAmelCase : Any = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : List[str] = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 372 | 1 |
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
snake_case_ : Tuple = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(__a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 534 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_SCREAMING_SNAKE_CASE = (7_20, 12_80) # Height, Width
_SCREAMING_SNAKE_CASE = (0.4, 0.6) # if height or width lower than this scale, drop it.
_SCREAMING_SNAKE_CASE = 1 / 1_00
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = 2_50
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ ,snake_case_ : Any = get_dataset(__a , __a )
for index in range(__a ):
snake_case_ : Dict = random.sample(range(len(__a ) ) , 4 )
snake_case_ ,snake_case_ ,snake_case_ : Optional[int] = update_image_and_anno(
__a , __a , __a , __a , __a , filter_scale=__a , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ : List[str] = random_chars(32 )
snake_case_ : Dict = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
snake_case_ : Any = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , __a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
snake_case_ : Dict = []
for anno in new_annos:
snake_case_ : int = anno[3] - anno[1]
snake_case_ : Dict = anno[4] - anno[2]
snake_case_ : int = anno[1] + width / 2
snake_case_ : str = anno[2] + height / 2
snake_case_ : Union[str, Any] = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(__a )
with open(f"""{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : List[Any] = []
snake_case_ : Optional[int] = []
for label_file in glob.glob(os.path.join(__a , '*.txt' ) ):
snake_case_ : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__a ) as in_file:
snake_case_ : Dict = in_file.readlines()
snake_case_ : int = os.path.join(__a , f"""{label_name}.jpg""" )
snake_case_ : int = []
for obj_list in obj_lists:
snake_case_ : str = obj_list.rstrip('\n' ).split(' ' )
snake_case_ : Tuple = float(obj[1] ) - float(obj[3] ) / 2
snake_case_ : str = float(obj[2] ) - float(obj[4] ) / 2
snake_case_ : Optional[Any] = float(obj[1] ) + float(obj[3] ) / 2
snake_case_ : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__a )
labels.append(__a )
return img_paths, labels
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a = 0.0 , ):
snake_case_ : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
snake_case_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ : Optional[Any] = int(scale_x * output_size[1] )
snake_case_ : int = int(scale_y * output_size[0] )
snake_case_ : str = []
snake_case_ : Dict = []
for i, index in enumerate(__a ):
snake_case_ : Any = all_img_list[index]
path_list.append(__a )
snake_case_ : List[str] = all_annos[index]
snake_case_ : Any = cva.imread(__a )
if i == 0: # top-left
snake_case_ : Optional[Any] = cva.resize(__a , (divid_point_x, divid_point_y) )
snake_case_ : Dict = img
for bbox in img_annos:
snake_case_ : Dict = bbox[1] * scale_x
snake_case_ : Tuple = bbox[2] * scale_y
snake_case_ : Optional[Any] = bbox[3] * scale_x
snake_case_ : str = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case_ : Tuple = cva.resize(__a , (output_size[1] - divid_point_x, divid_point_y) )
snake_case_ : Optional[Any] = img
for bbox in img_annos:
snake_case_ : int = scale_x + bbox[1] * (1 - scale_x)
snake_case_ : Dict = bbox[2] * scale_y
snake_case_ : Union[str, Any] = scale_x + bbox[3] * (1 - scale_x)
snake_case_ : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case_ : Optional[Any] = cva.resize(__a , (divid_point_x, output_size[0] - divid_point_y) )
snake_case_ : int = img
for bbox in img_annos:
snake_case_ : int = bbox[1] * scale_x
snake_case_ : List[str] = scale_y + bbox[2] * (1 - scale_y)
snake_case_ : List[str] = bbox[3] * scale_x
snake_case_ : Union[str, Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case_ : str = cva.resize(
__a , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case_ : Dict = img
for bbox in img_annos:
snake_case_ : Dict = scale_x + bbox[1] * (1 - scale_x)
snake_case_ : Tuple = scale_y + bbox[2] * (1 - scale_y)
snake_case_ : Union[str, Any] = scale_x + bbox[3] * (1 - scale_x)
snake_case_ : List[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case_ : Dict = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def SCREAMING_SNAKE_CASE__ ( __a ):
assert number_char > 1, "The number of character should greater than 1"
snake_case_ : Tuple = ascii_lowercase + digits
return "".join(random.choice(__a ) for _ in range(__a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 534 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__SCREAMING_SNAKE_CASE : int = TaTokenizerFast
__SCREAMING_SNAKE_CASE : Tuple = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 670 | import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = BioGptTokenizer
a_ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_lowerCAmelCase : Any = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_lowerCAmelCase : Union[str, Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
_lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_snake_case ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : Optional[int] = "lower newer"
_lowerCAmelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase : int = "lower"
_lowerCAmelCase : List[Any] = ["low", "er</w>"]
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase : int = tokens + ["<unk>"]
_lowerCAmelCase : str = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_lowerCAmelCase : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=_snake_case )
_lowerCAmelCase : str = tokenizer.encode("multi-sequence build" , add_special_tokens=_snake_case )
_lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 424 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A__ : List[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = BartphoTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = True
def lowercase_ ( self ) -> int:
"""simple docstring"""
super().setUp()
_lowercase: Optional[Any] = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
_lowercase: List[str] = dict(zip(A_ , range(len(A_ ) ) ) )
_lowercase: Optional[int] = {'''unk_token''': '''<unk>'''}
_lowercase: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
_lowercase: Dict = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **A_ ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowercase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Tuple = '''This is a là test'''
_lowercase: List[Any] = '''This is a<unk><unk> test'''
return input_text, output_text
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Tuple = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map )
_lowercase: int = '''This is a là test'''
_lowercase: Any = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
_lowercase: str = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_lowercase: Optional[Any] = tokens + [tokenizer.unk_token]
_lowercase: Dict = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
| 272 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
A__ : Any = 'hf-internal-testing/tiny-random-bert'
A__ : int = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
A__ : Dict = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class __magic_name__ ( unittest.TestCase ):
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Dict = cached_file(A_ , A_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(A_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(A_ , A_ ) ) )
with open(os.path.join(A_ , '''refs''' , '''main''' ) ) as f:
_lowercase: Tuple = f.read()
self.assertEqual(A_ , os.path.join(A_ , '''snapshots''' , A_ , A_ ) )
self.assertTrue(os.path.isfile(A_ ) )
# File is cached at the same place the second time.
_lowercase: List[Any] = cached_file(A_ , A_ )
self.assertEqual(A_ , A_ )
# Using a specific revision to test the full commit hash.
_lowercase: Union[str, Any] = cached_file(A_ , A_ , revision='''9b8c223''' )
self.assertEqual(A_ , os.path.join(A_ , '''snapshots''' , A_ , A_ ) )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , '''is not a valid model identifier''' ):
_lowercase: Union[str, Any] = cached_file('''tiny-random-bert''' , A_ )
with self.assertRaisesRegex(A_ , '''is not a valid git identifier''' ):
_lowercase: List[Any] = cached_file(A_ , A_ , revision='''aaaa''' )
with self.assertRaisesRegex(A_ , '''does not appear to have a file named''' ):
_lowercase: int = cached_file(A_ , '''conf''' )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , '''does not appear to have a file named''' ):
_lowercase: Dict = cached_file(A_ , '''conf''' )
with open(os.path.join(A_ , '''refs''' , '''main''' ) ) as f:
_lowercase: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(A_ , '''.no_exist''' , A_ , '''conf''' ) ) )
_lowercase: Optional[int] = cached_file(A_ , '''conf''' , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
_lowercase: Optional[int] = cached_file(A_ , '''conf''' , local_files_only=A_ , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
_lowercase: Optional[Any] = mock.Mock()
_lowercase: Optional[Any] = 500
_lowercase: Optional[Any] = {}
_lowercase: Dict = HTTPError
_lowercase: Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=A_ ) as mock_head:
_lowercase: Tuple = cached_file(A_ , '''conf''' , _raise_exceptions_for_connection_errors=A_ )
self.assertIsNone(A_ )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self ) -> str:
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(A_ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , A_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(A_ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , A_ , revision='''ahaha''' )
_lowercase: Optional[int] = get_file_from_repo('''bert-base-cased''' , A_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowercase: Union[str, Any] = json.loads(open(A_ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase: List[Any] = Path(A_ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(A_ , '''a.txt''' ) , str(A_ ) )
self.assertIsNone(get_file_from_repo(A_ , '''b.txt''' ) )
| 272 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class lowercase__ ( A_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None:
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
| 88 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
UpperCAmelCase = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
UpperCAmelCase = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
UpperCAmelCase = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
UpperCAmelCase = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> str:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""")),
"""references""": datasets.Value("""string"""),
}) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]:
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""")
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor:
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = Counter()
_lowerCamelCase : Any = 0
_lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE)
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)):
for candidate in candidates:
_lowerCamelCase : Any = candidate + """\n""" + test_case
_lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id])
_lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE)
futures.append(SCREAMING_SNAKE_CASE)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE):
_lowerCamelCase : int = future.result()
results[result["task_id"]].append((result["""completion_id"""], result))
_lowerCamelCase , _lowerCamelCase : List[Any] = [], []
for result in results.values():
result.sort()
_lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result]
total.append(len(SCREAMING_SNAKE_CASE))
correct.append(sum(SCREAMING_SNAKE_CASE))
_lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = k
_lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ):
"""simple docstring"""
def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) )
else:
assert len(__snake_case ) == len(__snake_case )
_lowerCamelCase : List[str] = iter(__snake_case )
return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
| 88 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
A__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A__ = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_snake_case , _snake_case )
def _a ( self : Union[str, Any] , **_snake_case : Tuple ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Dict , **_snake_case : Union[str, Any] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : Union[str, Any] ):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _snake_case )
self.assertIsInstance(processor_fast.tokenizer , _snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _snake_case )
self.assertIsInstance(processor_fast.image_processor , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A__ = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = self.prepare_image_inputs()
A__ = image_processor(_snake_case , return_tensors='np' )
A__ = processor(images=_snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = processor(text=_snake_case )
A__ = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _a ( self : str ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(_snake_case )
A__ = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 52 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_A: str = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , *__A , **__A ):
super().__init__(*__A , **__A )
self.check_model_type(__A )
def __lowerCamelCase ( self , __A=None , __A=None , __A=None , **__A ):
__UpperCAmelCase , __UpperCAmelCase = {}, {}
if padding is not None:
__UpperCAmelCase = padding
if truncation is not None:
__UpperCAmelCase = truncation
if top_k is not None:
__UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __A , __A = None , **__A ):
if isinstance(__A , (Image.Image, str) ) and isinstance(__A , __A ):
__UpperCAmelCase = {'image': image, 'question': question}
else:
__UpperCAmelCase = image
__UpperCAmelCase = super().__call__(__A , **__A )
return results
def __lowerCamelCase ( self , __A , __A=False , __A=False ):
__UpperCAmelCase = load_image(inputs['image'] )
__UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=__A , truncation=__A )
__UpperCAmelCase = self.image_processor(images=__A , return_tensors=self.framework )
model_inputs.update(__A )
return model_inputs
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = self.model(**__A )
return model_outputs
def __lowerCamelCase ( self , __A , __A=5 ):
if top_k > self.model.config.num_labels:
__UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase = model_outputs.logits.sigmoid()[0]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(__A )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__A , __A )]
| 126 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a : Dict = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= tempfile.mkdtemp()
lowercase__ : Optional[Any]= 8
# DPR tok
lowercase__ : Tuple= [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[Any]= [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple= {"unk_token": "<unk>"}
lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.get_dummy_dataset()
lowercase__ : Optional[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= dataset
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.get_dummy_dataset()
lowercase__ : Tuple= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : List[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Optional[int]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , "wb" ) )
lowercase__ : List[Any]= RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Optional[Any]= RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple= self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : Union[str, Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : int= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : Tuple= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= 1
lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : int= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= 1
lowercase__ : int= self.get_dummy_legacy_index_retriever()
lowercase__ : Optional[Any]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowercase__ : str= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
import torch
lowercase__ : str= 1
lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever()
lowercase__ : str= [[5, 7], [10, 11]]
lowercase__ : List[str]= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
lowercase__, lowercase__, lowercase__ : Optional[int]= (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
lowercase__ : Any= retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , )
lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict= 1
lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
lowercase__ : List[str]= [[5, 7], [10, 11]]
lowercase__ : Any= np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
| 85 | 0 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase = " " ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Dict = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
lowerCAmelCase_ : Optional[int] = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 610 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a = 'src/transformers'
__a = 'docs/source/en/tasks'
def a ( snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: Any ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ = f.readlines()
# Find the start prompt.
lowercase_ = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
lowercase_ = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
__a = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def a ( snake_case__: Tuple ):
'''simple docstring'''
lowercase_ = TASK_GUIDE_TO_MODELS[task_guide]
lowercase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
lowercase_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def a ( snake_case__: List[Any] , snake_case__: Optional[int]=False ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
lowercase_ = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 97 | 0 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowercase ( _a ):
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
super().__init__(*_A , **_A )
requires_backends(self , '''decord''' )
self.check_model_type(_A )
def snake_case ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None ):
A : Any = {}
if frame_sampling_rate is not None:
A : Any = frame_sampling_rate
if num_frames is not None:
A : Optional[int] = num_frames
A : Tuple = {}
if top_k is not None:
A : List[str] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCAmelCase , **_UpperCAmelCase ):
return super().__call__(_A , **_A )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=1 ):
if num_frames is None:
A : int = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
A : Dict = BytesIO(requests.get(_A ).content )
A : List[Any] = VideoReader(_A )
videoreader.seek(0 )
A : List[str] = 0
A : List[Any] = num_frames * frame_sampling_rate - 1
A : Optional[Any] = np.linspace(_A , _A , num=_A , dtype=np.intaa )
A : Dict = videoreader.get_batch(_A ).asnumpy()
A : Optional[Any] = list(_A )
A : int = self.image_processor(_A , return_tensors=self.framework )
return model_inputs
def snake_case ( self , _UpperCAmelCase ):
A : Tuple = self.model(**_A )
return model_outputs
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=5 ):
if top_k > self.model.config.num_labels:
A : Any = self.model.config.num_labels
if self.framework == "pt":
A : Optional[Any] = model_outputs.logits.softmax(-1 )[0]
A, A : Tuple = probs.topk(_A )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A : int = scores.tolist()
A : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_A , _A )]
| 714 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
snake_case_ = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None ) -> List[Any]:
if rng is None:
A : int = random.Random()
A : int = 1
for dim in shape:
total_dims *= dim
A : Union[str, Any] = []
for _ in range(UpperCamelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
A : Union[str, Any] = np.array(UpperCamelCase__ , dtype=jnp.intaa ).reshape(UpperCamelCase__ )
return output
def _lowerCamelCase( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=None ) -> Dict:
A : Union[str, Any] = ids_tensor(UpperCamelCase__ , vocab_size=2 , rng=UpperCamelCase__ )
# make sure that at least one token is attended to for each batch
A : Any = 1
return attn_mask
@require_flax
class _lowercase :
_UpperCamelCase = None
_UpperCamelCase = ()
def snake_case ( self ):
A, A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
A : Dict = 2
A : List[str] = inputs['''input_ids'''].shape[-1] // 2
A : int = inputs['''input_ids'''][:max_batch_size, :sequence_length]
A : Optional[Any] = jnp.ones_like(_UpperCAmelCase )
A : int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
A : Optional[int] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
A : str = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
A : Optional[int] = False
A : Union[str, Any] = max_length
A : str = 0
for model_class in self.all_generative_model_classes:
A : int = model_class(_UpperCAmelCase )
A : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
A : int = getattr(_UpperCAmelCase , _UpperCAmelCase )
A : Union[str, Any] = pt_model_class(_UpperCAmelCase ).eval()
A : Tuple = load_flax_weights_in_pytorch_model(_UpperCAmelCase , flax_model.params )
A : Union[str, Any] = flax_model.generate(_UpperCAmelCase ).sequences
A : Optional[int] = pt_model.generate(torch.tensor(_UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
A : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : str = self._get_input_ids_and_config()
A : Optional[int] = False
A : Dict = max_length
for model_class in self.all_generative_model_classes:
A : Any = model_class(_UpperCAmelCase )
A : Dict = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : str = jit(model.generate )
A : List[Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : List[Any] = self._get_input_ids_and_config()
A : List[str] = True
A : List[Any] = max_length
for model_class in self.all_generative_model_classes:
A : Any = model_class(_UpperCAmelCase )
A : Tuple = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Tuple = jit(model.generate )
A : int = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : List[str] = self._get_input_ids_and_config()
A : Any = False
A : str = max_length
A : Optional[int] = 2
for model_class in self.all_generative_model_classes:
A : Dict = model_class(_UpperCAmelCase )
A : Any = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : List[Any] = jit(model.generate )
A : Optional[Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[int] = self._get_input_ids_and_config()
A : Dict = False
A : List[Any] = max_length
A : Optional[int] = 2
A : int = 2
for model_class in self.all_generative_model_classes:
A : List[str] = model_class(_UpperCAmelCase )
A : List[str] = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def snake_case ( self ):
A, A, A, A : List[Any] = self._get_input_ids_and_config()
A : Any = True
A : Any = max_length
A : Any = 0.8
A : List[Any] = 10
A : List[Any] = 0.3
A : Union[str, Any] = 1
A : Any = 8
A : List[Any] = 9
for model_class in self.all_generative_model_classes:
A : List[Any] = model_class(_UpperCAmelCase )
A : Any = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : int = jit(model.generate )
A : List[Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
A : int = max_length
A : Dict = 1
A : Tuple = 8
A : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
A : Dict = model_class(_UpperCAmelCase )
A : str = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : int = jit(model.generate )
A : Optional[int] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : int = self._get_input_ids_and_config()
A : List[Any] = max_length
A : Optional[Any] = 2
A : Tuple = 1
A : List[Any] = 8
A : Dict = 9
for model_class in self.all_generative_model_classes:
A : Union[str, Any] = model_class(_UpperCAmelCase )
A : Any = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Union[str, Any] = jit(model.generate )
A : int = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
A : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
A : int = False
A : List[Any] = max_length
for model_class in self.all_generative_model_classes:
A : List[str] = model_class(_UpperCAmelCase )
A : Dict = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Dict = jit(model.generate )
A : Optional[Any] = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
A : Optional[int] = True
A : int = max_length
for model_class in self.all_generative_model_classes:
A : int = model_class(_UpperCAmelCase )
A : Union[str, Any] = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Optional[Any] = jit(model.generate )
A : Dict = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A : Optional[int] = attention_mask.at[(0, 0)].set(0 )
A : Any = 2
A : Any = max_length
for model_class in self.all_generative_model_classes:
A : Dict = model_class(_UpperCAmelCase )
A : Optional[Any] = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : str = jit(model.generate )
A : List[str] = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _lowercase ( unittest.TestCase ):
def snake_case ( self ):
A : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
A : Dict = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A : Union[str, Any] = '''Hello world'''
A : Union[str, Any] = tokenizer(_UpperCAmelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_UpperCAmelCase , '''do_samples''' ):
model.generate(_UpperCAmelCase , do_samples=_UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_UpperCAmelCase , '''foo''' ):
A : List[Any] = {'''foo''': '''bar'''}
model.generate(_UpperCAmelCase , **_UpperCAmelCase )
| 537 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__SCREAMING_SNAKE_CASE : Any = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_( lowercase_ : Any ) -> str:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ) -> Optional[int]:
if args.student_type == "roberta":
_lowerCamelCase = False
elif args.student_type == "gpt2":
_lowerCamelCase = False
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Tuple:
if args.student_type == "roberta":
_lowerCamelCase = False
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=40_00 , help='''Checkpoint interval.''' )
_lowerCamelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = MODEL_CLASSES[args.student_type]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCamelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCamelCase = tokenizer.all_special_tokens.index(snake_case_ )
_lowerCamelCase = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
_lowerCamelCase = special_tok_ids
_lowerCamelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
_lowerCamelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
_lowerCamelCase = pickle.load(snake_case_ )
_lowerCamelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCamelCase = 0.0 # do not predict special tokens
_lowerCamelCase = torch.from_numpy(snake_case_ )
else:
_lowerCamelCase = None
_lowerCamelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
_lowerCamelCase = student_config_class.from_pretrained(args.student_config )
_lowerCamelCase = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
_lowerCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
_lowerCamelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
_lowerCamelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCamelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 661 | '''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | 0 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __snake_case :
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : List[str]=None , lowerCamelCase : List[Any]=None ) -> List[Any]:
# Input as list
lowerCAmelCase_ : str = list(poly_a or [0] )[:]
lowerCAmelCase_ : List[Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCAmelCase_ : Optional[Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCAmelCase_ : str = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCAmelCase_ : Tuple = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCAmelCase_ : List[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCAmelCase_ : Dict = self.__multiply()
def __lowercase ( self : List[Any] , lowerCamelCase : Dict ) -> List[str]:
lowerCAmelCase_ : int = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(lowerCamelCase ) <= 1:
return dft[0]
#
lowerCAmelCase_ : List[str] = self.c_max_length // 2
while next_ncol > 0:
lowerCAmelCase_ : Dict = [[] for i in range(lowerCamelCase )]
lowerCAmelCase_ : Dict = self.root**next_ncol
# First half of next step
lowerCAmelCase_ : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCAmelCase_ : Any = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCAmelCase_ : Dict = new_dft
lowerCAmelCase_ : Optional[Any] = next_ncol // 2
return dft[0]
def __lowercase ( self : Any ) -> List[str]:
lowerCAmelCase_ : List[Any] = self.__dft("""A""" )
lowerCAmelCase_ : Dict = self.__dft("""B""" )
lowerCAmelCase_ : Tuple = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCAmelCase_ : List[str] = 2
while next_ncol <= self.c_max_length:
lowerCAmelCase_ : Union[str, Any] = [[] for i in range(lowerCamelCase )]
lowerCAmelCase_ : Union[str, Any] = self.root ** (next_ncol // 2)
lowerCAmelCase_ : List[Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCAmelCase_ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
lowerCAmelCase_ : str = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Dict:
lowerCAmelCase_ : Optional[int] = """A = """ + """ + """.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCAmelCase_ : Optional[int] = """B = """ + """ + """.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCAmelCase_ : str = """A*B = """ + """ + """.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : str = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'autoformer'
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[str] , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : str = "student_t" , lowerCamelCase : str = "nll" , lowerCamelCase : int = 1 , lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase : bool = True , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : int = 64 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 32 , lowerCamelCase : int = 32 , lowerCamelCase : str = "gelu" , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : int = 1_00 , lowerCamelCase : float = 0.02 , lowerCamelCase : bool = True , lowerCamelCase : Optional[int]=True , lowerCamelCase : int = 10 , lowerCamelCase : int = 25 , lowerCamelCase : int = 3 , **lowerCamelCase : Dict , ) -> List[Any]:
# time series specific configuration
lowerCAmelCase_ : str = prediction_length
lowerCAmelCase_ : str = context_length if context_length is not None else prediction_length
lowerCAmelCase_ : List[Any] = distribution_output
lowerCAmelCase_ : Optional[Any] = loss
lowerCAmelCase_ : List[str] = input_size
lowerCAmelCase_ : Optional[int] = num_time_features
lowerCAmelCase_ : List[Any] = lags_sequence
lowerCAmelCase_ : str = scaling
lowerCAmelCase_ : Optional[Any] = num_dynamic_real_features
lowerCAmelCase_ : str = num_static_real_features
lowerCAmelCase_ : Dict = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase_ : Union[str, Any] = cardinality
else:
lowerCAmelCase_ : int = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase_ : Optional[int] = embedding_dimension
else:
lowerCAmelCase_ : Any = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase_ : List[str] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase_ : List[str] = d_model
lowerCAmelCase_ : Optional[int] = encoder_attention_heads
lowerCAmelCase_ : List[str] = decoder_attention_heads
lowerCAmelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCAmelCase_ : List[Any] = decoder_ffn_dim
lowerCAmelCase_ : Dict = encoder_layers
lowerCAmelCase_ : int = decoder_layers
lowerCAmelCase_ : Tuple = dropout
lowerCAmelCase_ : Optional[Any] = attention_dropout
lowerCAmelCase_ : str = activation_dropout
lowerCAmelCase_ : List[Any] = encoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ : Optional[int] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : Union[str, Any] = use_cache
# Autoformer
lowerCAmelCase_ : Optional[Any] = label_length
lowerCAmelCase_ : List[Any] = moving_average
lowerCAmelCase_ : List[str] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def __lowercase ( self : Union[str, Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 398 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : int ) ->List[Any]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __A :
"""simple docstring"""
A_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __A :
"""simple docstring"""
A_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A_ = field(metadata={'help': 'Should contain the data files for the task.'} )
A_ = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowerCAmelCase ( ) ->Tuple:
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
try:
lowercase__ = processors[data_args.task_name]()
lowercase__ = processor.get_labels()
lowercase__ = len(UpperCamelCase__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
lowercase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCamelCase__ , p.label_ids )}
# Data collator
lowercase__ = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , UpperCamelCase__ , UpperCamelCase__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCamelCase__ )
return results
def _lowerCAmelCase ( lowercase : str ) ->str:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 161 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : float | Decimal , UpperCamelCase__ : float = 10**-10 ):
_UpperCAmelCase : str = a
while True:
_UpperCAmelCase : int = Decimal(UpperCamelCase__ ) - (
Decimal(eval(UpperCamelCase__ ) ) / Decimal(eval(str(diff(UpperCamelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCamelCase__ ) ) < precision: # noqa: S307
return float(UpperCamelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 506 | 0 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase ( __lowerCamelCase ):
lowercase = 42
lowercase = jnp.floataa
lowercase = True
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
super().setup()
lowercase_ : int = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : int = super().__call__(*UpperCamelCase_ ,**UpperCamelCase_ )
lowercase_ : Optional[int] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase ( __lowerCamelCase ):
lowercase = FlaxBigBirdForNaturalQuestionsModule
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
def cross_entropy(__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple=None ):
lowercase_ : List[Any] = logits.shape[-1]
lowercase_ : Optional[int] = (labels[..., None] == jnp.arange(A__ )[None]).astype('f4' )
lowercase_ : List[str] = jax.nn.log_softmax(A__ , axis=-1 )
lowercase_ : Dict = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase_ : Union[str, Any] = reduction(A__ )
return loss
lowercase_ : Tuple = partial(A__ , reduction=jnp.mean )
lowercase_ : List[Any] = cross_entropy(A__ , A__ )
lowercase_ : Any = cross_entropy(A__ , A__ )
lowercase_ : int = cross_entropy(A__ , A__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase :
lowercase = "google/bigbird-roberta-base"
lowercase = 3_0_0_0
lowercase = 1_0_5_0_0
lowercase = 1_2_8
lowercase = 3
lowercase = 1
lowercase = 5
# tx_args
lowercase = 3e-5
lowercase = 0.0
lowercase = 2_0_0_0_0
lowercase = 0.0_095
lowercase = "bigbird-roberta-natural-questions"
lowercase = "training-expt"
lowercase = "data/nq-training.jsonl"
lowercase = "data/nq-validation.jsonl"
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
os.makedirs(self.base_dir ,exist_ok=UpperCamelCase_ )
lowercase_ : Dict = os.path.join(self.base_dir ,self.save_dir )
lowercase_ : List[Any] = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase :
lowercase = 42
lowercase = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = self.collate_fn(UpperCamelCase_ )
lowercase_ : Optional[Any] = jax.tree_util.tree_map(UpperCamelCase_ ,UpperCamelCase_ )
return batch
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ : List[str] = self.fetch_inputs(features['input_ids'] )
lowercase_ : List[str] = {
'input_ids': jnp.array(UpperCamelCase_ ,dtype=jnp.intaa ),
'attention_mask': jnp.array(UpperCamelCase_ ,dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] ,dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] ,dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] ,dtype=jnp.intaa ),
}
return batch
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = [self._fetch_inputs(UpperCamelCase_ ) for ids in input_ids]
return zip(*UpperCamelCase_ )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = [1 for _ in range(len(UpperCamelCase_ ) )]
while len(UpperCamelCase_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=None ):
if seed is not None:
lowercase_ : Dict = dataset.shuffle(seed=A__ )
for i in range(len(A__ ) // batch_size ):
lowercase_ : Tuple = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(A__ )
@partial(jax.pmap , axis_name='batch' )
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Any ):
def loss_fn(__SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : int = model_inputs.pop('start_labels' )
lowercase_ : int = model_inputs.pop('end_labels' )
lowercase_ : Tuple = model_inputs.pop('pooled_labels' )
lowercase_ : Dict = state.apply_fn(**A__ , params=A__ , dropout_rng=A__ , train=A__ )
lowercase_ , lowercase_ , lowercase_ : int = outputs
return state.loss_fn(
A__ , A__ , A__ , A__ , A__ , A__ , )
lowercase_ , lowercase_ : Union[str, Any] = jax.random.split(A__ )
lowercase_ : int = jax.value_and_grad(A__ )
lowercase_ , lowercase_ : str = grad_fn(state.params )
lowercase_ : Union[str, Any] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
lowercase_ : int = jax.lax.pmean(A__ , 'batch' )
lowercase_ : List[Any] = state.apply_gradients(grads=A__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ : Any = model_inputs.pop('start_labels' )
lowercase_ : Dict = model_inputs.pop('end_labels' )
lowercase_ : Dict = model_inputs.pop('pooled_labels' )
lowercase_ : Union[str, Any] = state.apply_fn(**A__ , params=state.params , train=A__ )
lowercase_ , lowercase_ , lowercase_ : str = outputs
lowercase_ : Dict = state.loss_fn(A__ , A__ , A__ , A__ , A__ , A__ )
lowercase_ : List[str] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class UpperCamelCase ( train_state.TrainState ):
lowercase = struct.field(pytree_node=__lowerCamelCase )
@dataclass
class UpperCamelCase :
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = None
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = model.params
lowercase_ : Optional[int] = TrainState.create(
apply_fn=model.__call__ ,params=UpperCamelCase_ ,tx=UpperCamelCase_ ,loss_fn=UpperCamelCase_ ,)
if ckpt_dir is not None:
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = restore_checkpoint(UpperCamelCase_ ,UpperCamelCase_ )
lowercase_ : Optional[int] = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
lowercase_ , lowercase_ : List[Any] = build_tx(**UpperCamelCase_ )
lowercase_ : Optional[Any] = train_state.TrainState(
step=UpperCamelCase_ ,apply_fn=model.__call__ ,params=UpperCamelCase_ ,tx=UpperCamelCase_ ,opt_state=UpperCamelCase_ ,)
lowercase_ : Tuple = args
lowercase_ : List[str] = data_collator
lowercase_ : List[str] = lr
lowercase_ : List[str] = params
lowercase_ : List[Any] = jax_utils.replicate(UpperCamelCase_ )
return state
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.args
lowercase_ : str = len(UpperCamelCase_ ) // args.batch_size
lowercase_ : List[Any] = jax.random.PRNGKey(0 )
lowercase_ : Dict = jax.random.split(UpperCamelCase_ ,jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase_ : List[str] = jnp.array(0 ,dtype=jnp.floataa )
lowercase_ : Dict = get_batched_dataset(UpperCamelCase_ ,args.batch_size ,seed=UpperCamelCase_ )
lowercase_ : List[Any] = 0
for batch in tqdm(UpperCamelCase_ ,total=UpperCamelCase_ ,desc=f'''Running EPOCH-{epoch}''' ):
lowercase_ : str = self.data_collator(UpperCamelCase_ )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.train_step_fn(UpperCamelCase_ ,UpperCamelCase_ ,**UpperCamelCase_ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
lowercase_ : Union[str, Any] = jax_utils.unreplicate(state.step )
lowercase_ : Any = running_loss.item() / i
lowercase_ : Union[str, Any] = self.scheduler_fn(state_step - 1 )
lowercase_ : Union[str, Any] = self.evaluate(UpperCamelCase_ ,UpperCamelCase_ )
lowercase_ : List[Any] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(UpperCamelCase_ ) )
self.logger.log(UpperCamelCase_ ,commit=UpperCamelCase_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' ,state=UpperCamelCase_ )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = get_batched_dataset(UpperCamelCase_ ,self.args.batch_size )
lowercase_ : Dict = len(UpperCamelCase_ ) // self.args.batch_size
lowercase_ : Optional[int] = jnp.array(0 ,dtype=jnp.floataa )
lowercase_ : str = 0
for batch in tqdm(UpperCamelCase_ ,total=UpperCamelCase_ ,desc='Evaluating ... ' ):
lowercase_ : List[str] = self.data_collator(UpperCamelCase_ )
lowercase_ : Any = self.val_step_fn(UpperCamelCase_ ,**UpperCamelCase_ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = jax_utils.unreplicate(UpperCamelCase_ )
print(f'''SAVING CHECKPOINT IN {save_dir}''' ,end=' ... ' )
self.model_save_fn(UpperCamelCase_ ,params=state.params )
with open(os.path.join(UpperCamelCase_ ,'opt_state.msgpack' ) ,'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(UpperCamelCase_ ,'args.joblib' ) )
joblib.dump(self.data_collator ,os.path.join(UpperCamelCase_ ,'data_collator.joblib' ) )
with open(os.path.join(UpperCamelCase_ ,'training_state.json' ) ,'w' ) as f:
json.dump({'step': state.step.item()} ,UpperCamelCase_ )
print('DONE' )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] ):
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ' )
with open(os.path.join(A__ , 'flax_model.msgpack' ) , 'rb' ) as f:
lowercase_ : Optional[int] = from_bytes(state.params , f.read() )
with open(os.path.join(A__ , 'opt_state.msgpack' ) , 'rb' ) as f:
lowercase_ : Tuple = from_bytes(state.opt_state , f.read() )
lowercase_ : Dict = joblib.load(os.path.join(A__ , 'args.joblib' ) )
lowercase_ : str = joblib.load(os.path.join(A__ , 'data_collator.joblib' ) )
with open(os.path.join(A__ , 'training_state.json' ) , 'r' ) as f:
lowercase_ : List[str] = json.load(A__ )
lowercase_ : List[str] = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : str = num_train_steps - warmup_steps
lowercase_ : Optional[int] = optax.linear_schedule(init_value=A__ , end_value=A__ , transition_steps=A__ )
lowercase_ : str = optax.linear_schedule(init_value=A__ , end_value=1E-7 , transition_steps=A__ )
lowercase_ : Tuple = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
def weight_decay_mask(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Any = traverse_util.flatten_dict(A__ )
lowercase_ : Tuple = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(A__ )
lowercase_ : Optional[int] = scheduler_fn(A__ , A__ , A__ , A__ )
lowercase_ : Union[str, Any] = optax.adamw(learning_rate=A__ , weight_decay=A__ , mask=A__ )
return tx, lr
| 714 | """simple docstring"""
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : int , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A__ = None
if self.model.config.prefix is not None:
A__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A__ , A__ , A__ = self._sanitize_parameters(prefix=UpperCAmelCase__ , **self._forward_params)
A__ = {**self._preprocess_params, **preprocess_params}
A__ = {**self._forward_params, **forward_params}
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Union[str, Any] , ) ->Dict:
'''simple docstring'''
A__ = {}
if prefix is not None:
A__ = prefix
if prefix:
A__ = self.tokenizer(
UpperCAmelCase__ , padding=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=self.framework)
A__ = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''')
A__ = handle_long_generation
preprocess_params.update(UpperCAmelCase__)
A__ = generate_kwargs
A__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''')
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''')
A__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''')
A__ = ReturnType.TENSORS
if return_type is not None:
A__ = return_type
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if stop_sequence is not None:
A__ = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
if len(UpperCAmelCase__) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''')
A__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True})
return super()._parse_and_tokenize(*UpperCAmelCase__ , **UpperCAmelCase__)
def __call__( self : Optional[int] , UpperCAmelCase__ : Any , **UpperCAmelCase__ : Dict) ->str:
'''simple docstring'''
return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str="" , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : Any) ->Tuple:
'''simple docstring'''
A__ = self.tokenizer(
prefix + prompt_text , padding=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=self.framework)
A__ = prompt_text
if handle_long_generation == "hole":
A__ = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
A__ = generate_kwargs['''max_new_tokens''']
else:
A__ = generate_kwargs.get('''max_length''' , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''')
if cur_len + new_tokens > self.tokenizer.model_max_length:
A__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''')
A__ = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
A__ = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
A__ = model_inputs['''input_ids''']
A__ = model_inputs.get('''attention_mask''' , UpperCAmelCase__)
# Allow empty prompts
if input_ids.shape[1] == 0:
A__ = None
A__ = None
A__ = 1
else:
A__ = input_ids.shape[0]
A__ = model_inputs.pop('''prompt_text''')
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A__ = generate_kwargs.pop('''prefix_length''' , 0)
if prefix_length > 0:
A__ = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
A__ = generate_kwargs.get('''max_length''') or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A__ = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A__ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = generated_sequence.shape[0]
if self.framework == "pt":
A__ = generated_sequence.reshape(UpperCAmelCase__ , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
A__ = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=ReturnType.FULL_TEXT , UpperCAmelCase__ : Union[str, Any]=True) ->str:
'''simple docstring'''
A__ = model_outputs['''generated_sequence'''][0]
A__ = model_outputs['''input_ids''']
A__ = model_outputs['''prompt_text''']
A__ = generated_sequence.numpy().tolist()
A__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A__ = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A__ = self.tokenizer.decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A__ = 0
else:
A__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , ))
if return_type == ReturnType.FULL_TEXT:
A__ = prompt_text + text[prompt_length:]
else:
A__ = text[prompt_length:]
A__ = {'''generated_text''': all_text}
records.append(UpperCAmelCase__)
return records
| 87 |
from __future__ import annotations
def __A(lowerCAmelCase , lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = sum(lowerCAmelCase )
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return result
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> None:
"""simple docstring"""
if sum(lowerCAmelCase ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase )) < max_sum:
return
if sum(lowerCAmelCase ) == max_sum:
result.append(lowerCAmelCase )
return
for index in range(lowerCAmelCase , len(lowerCAmelCase ) ):
create_state_space_tree(
lowerCAmelCase , lowerCAmelCase , index + 1 , [*path, nums[index]] , lowerCAmelCase , remaining_nums_sum - nums[index] , )
lowerCamelCase__ = [3, 34, 4, 12, 5, 2]
lowerCamelCase__ = 9
lowerCamelCase__ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 612 | 0 |
'''simple docstring'''
import os
import sys
import unittest
_lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowercase = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(_lowercase , """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_lowerCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(_lowercase , """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_lowerCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(_lowercase , """torch_and_transformers_and_onnx""" )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , _lowercase )
self.assertIn("""torch_and_transformers""" , _lowercase )
self.assertIn("""flax_and_transformers""" , _lowercase )
self.assertIn("""torch_and_transformers_and_onnx""" , _lowercase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(_lowercase , """\nCONSTANT = None\n""" )
_lowerCAmelCase = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
_lowercase , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_lowerCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_lowerCAmelCase = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_lowerCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , _lowercase )
| 162 |
'''simple docstring'''
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
assert x is not None
assert y is not None
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = len(__lowerCamelCase )
# declaring the array for storing the dp values
_lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
_lowerCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowerCAmelCase = """"""
_lowerCAmelCase , _lowerCAmelCase = m, n
while i > 0 and j > 0:
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowercase = """AGGTAB"""
_lowercase = """GXTXAYB"""
_lowercase = 4
_lowercase = """GTAB"""
_lowercase , _lowercase = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 162 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : str , _A : List[str] , _A : Optional[int] , _A : Dict = 3 , _A : List[str] = 1 , _A : Optional[int] = 1 , _A : str = "relu" , ):
super().__init__()
_UpperCamelCase = nn.Convad(
A__ , A__ , kernel_size=A__ , stride=A__ , padding=kernel_size // 2 , groups=A__ , bias=A__ , )
_UpperCamelCase = nn.BatchNormad(A__ )
_UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : Dict , _A : List[str] ):
_UpperCamelCase = self.convolution(A__ )
_UpperCamelCase = self.normalization(A__ )
_UpperCamelCase = self.activation(A__ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Any , _A : List[Any] ):
super().__init__()
_UpperCamelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_UpperCamelCase = config.num_channels
def UpperCamelCase_ ( self : Dict , _A : Tuple ):
_UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_UpperCamelCase = self.embedder(A__ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Union[str, Any] , _A : int , _A : Any , _A : Optional[Any] = 2 ):
super().__init__()
_UpperCamelCase = nn.Convad(A__ , A__ , kernel_size=1 , stride=A__ , bias=A__ )
_UpperCamelCase = nn.BatchNormad(A__ )
def UpperCamelCase_ ( self : Optional[Any] , _A : List[Any] ):
_UpperCamelCase = self.convolution(A__ )
_UpperCamelCase = self.normalization(A__ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Optional[int] , _A : Any , _A : Optional[Any] ):
super().__init__()
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
_UpperCamelCase = nn.Sequential(
nn.Convad(A__ , A__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(A__ , A__ , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : List[Any] , _A : Optional[int] ):
_UpperCamelCase = self.pooler(A__ )
_UpperCamelCase = self.attention(A__ )
_UpperCamelCase = hidden_state * attention
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Optional[int] , _A : List[str] , _A : int , _A : Dict , _A : List[str] = 1 ):
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
RegNetShortCut(A__ , A__ , stride=A__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
RegNetConvLayer(A__ , A__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(A__ , A__ , stride=A__ , groups=A__ , activation=config.hidden_act ) , RegNetConvLayer(A__ , A__ , kernel_size=1 , activation=A__ ) , )
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Tuple , _A : List[Any] ):
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(A__ )
_UpperCamelCase = self.shortcut(A__ )
hidden_state += residual
_UpperCamelCase = self.activation(A__ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Union[str, Any] , _A : List[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : List[str] = 1 ):
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
RegNetShortCut(A__ , A__ , stride=A__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
RegNetConvLayer(A__ , A__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(A__ , A__ , stride=A__ , groups=A__ , activation=config.hidden_act ) , RegNetSELayer(A__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(A__ , A__ , kernel_size=1 , activation=A__ ) , )
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : int , _A : List[Any] ):
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(A__ )
_UpperCamelCase = self.shortcut(A__ )
hidden_state += residual
_UpperCamelCase = self.activation(A__ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Optional[int] , _A : int , _A : Dict , _A : Optional[Any] , _A : List[str] = 2 , _A : Dict = 2 , ):
super().__init__()
_UpperCamelCase = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
_UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
A__ , A__ , A__ , stride=A__ , ) , *[layer(A__ , A__ , A__ ) for _ in range(depth - 1 )] , )
def UpperCamelCase_ ( self : Any , _A : Optional[int] ):
_UpperCamelCase = self.layers(A__ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : int , _A : Union[str, Any] ):
super().__init__()
_UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
A__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A__ , config.depths[1:] ):
self.stages.append(RegNetStage(A__ , A__ , A__ , depth=A__ ) )
def UpperCamelCase_ ( self : Dict , _A : List[Any] , _A : Dict = False , _A : Any = True ):
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(A__ )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=A__ , hidden_states=A__ )
class lowerCAmelCase_ ( snake_case__ ):
UpperCAmelCase = RegNetConfig
UpperCAmelCase = "regnet"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = True
def UpperCamelCase_ ( self : str , _A : Optional[Any] ):
if isinstance(A__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCamelCase_ ( self : Optional[int] , _A : Dict , _A : str=False ):
if isinstance(A__ , A__ ):
_UpperCamelCase = value
_lowerCAmelCase = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", snake_case__, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : List[str] , _A : Any ):
super().__init__(A__ )
_UpperCamelCase = config
_UpperCamelCase = RegNetEmbeddings(A__ )
_UpperCamelCase = RegNetEncoder(A__ )
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Optional[Any] , _A : Any , _A : Tuple = None , _A : Optional[int] = None ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(A__ )
_UpperCamelCase = self.encoder(
A__ , output_hidden_states=A__ , return_dict=A__ )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(A__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A__ , pooler_output=A__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", snake_case__, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Optional[Any] , _A : Any ):
super().__init__(A__ )
_UpperCamelCase = config.num_labels
_UpperCamelCase = RegNetModel(A__ )
# classification head
_UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : int , _A : Tuple = None , _A : List[Any] = None , _A : Union[str, Any] = None , _A : int = None , ):
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(A__ , output_hidden_states=A__ , return_dict=A__ )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier(A__ )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = "single_label_classification"
else:
_UpperCamelCase = "multi_label_classification"
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(A__ , A__ )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(A__ , A__ )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A__ , logits=A__ , hidden_states=outputs.hidden_states )
| 10 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _a ,_a ,_a ) -> str:
# Initialise PyTorch model
UpperCAmelCase_: Any = RemBertConfig.from_json_file(_a )
print("Building PyTorch model from configuration: {}".format(str(_a ) ) )
UpperCAmelCase_: Dict = RemBertModel(_a )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_a ,_a ,_a )
# Save pytorch-model
print("Save PyTorch model to {}".format(_a ) )
torch.save(model.state_dict() ,_a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 137 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = ["""names""", """prefix"""]
SCREAMING_SNAKE_CASE_ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
SCREAMING_SNAKE_CASE_ = ["""encoding_errors""", """on_bad_lines"""]
SCREAMING_SNAKE_CASE_ = ["""date_format"""]
@dataclass
class snake_case_ ( datasets.BuilderConfig ):
__lowerCAmelCase = ","
__lowerCAmelCase = None
__lowerCAmelCase = "infer"
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = True
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = False
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = None
__lowerCAmelCase = "."
__lowerCAmelCase = None
__lowerCAmelCase = "\""
__lowerCAmelCase = 0
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = 0
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = None
__lowerCAmelCase = 1_0_0_0_0
__lowerCAmelCase = None
__lowerCAmelCase = "strict"
__lowerCAmelCase = "error"
__lowerCAmelCase = None
def snake_case_ ( self ):
if self.delimiter is not None:
a_ : int = self.delimiter
if self.column_names is not None:
a_ : Dict = self.column_names
@property
def snake_case_ ( self ):
a_ : List[Any] = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case_ ( datasets.ArrowBasedBuilder ):
__lowerCAmelCase = CsvConfig
def snake_case_ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def snake_case_ ( self , a_ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
a_ : Any = data_files
if isinstance(a_ , a_ ):
a_ : Optional[int] = [files]
a_ : Optional[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a_ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
a_ : Tuple = [files]
a_ : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={"files": files} ) )
return splits
def snake_case_ ( self , a_ ):
if self.config.features is not None:
a_ : Dict = self.config.features.arrow_schema
if all(not require_storage_cast(a_ ) for feature in self.config.features.values() ):
# cheaper cast
a_ : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a_ : Optional[int] = table_cast(a_ , a_ )
return pa_table
def snake_case_ ( self , a_ ):
a_ : List[Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a_ : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
a_ : Any = pd.read_csv(a_ , iterator=a_ , dtype=a_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(a_ ):
a_ : Optional[int] = pa.Table.from_pandas(a_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a_ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise | 706 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
SCREAMING_SNAKE_CASE_ = """Create a default config file for Accelerate with only a few flags set."""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__="no", SCREAMING_SNAKE_CASE__ = default_json_config_file, SCREAMING_SNAKE_CASE__ = False ) -> Tuple:
a_ : Union[str, Any] = Path(SCREAMING_SNAKE_CASE__ )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
a_ : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
a_ : str = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
a_ : int = torch.cuda.device_count()
a_ : Optional[Any] = num_gpus
a_ : int = False
if num_gpus > 1:
a_ : Any = "MULTI_GPU"
else:
a_ : int = "NO"
elif is_xpu_available() and use_xpu:
a_ : int = torch.xpu.device_count()
a_ : str = num_xpus
a_ : Tuple = False
if num_xpus > 1:
a_ : int = "MULTI_XPU"
else:
a_ : List[str] = "NO"
elif is_npu_available():
a_ : List[Any] = torch.npu.device_count()
a_ : int = num_npus
a_ : List[Any] = False
if num_npus > 1:
a_ : str = "MULTI_NPU"
else:
a_ : Union[str, Any] = "NO"
else:
a_ : Optional[Any] = 0
a_ : Optional[Any] = True
a_ : Tuple = 1
a_ : Optional[int] = "NO"
a_ : str = ClusterConfig(**SCREAMING_SNAKE_CASE__ )
config.to_json_file(SCREAMING_SNAKE_CASE__ )
return path
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : Dict = parser.add_parser("default", parents=SCREAMING_SNAKE_CASE__, help=SCREAMING_SNAKE_CASE__, formatter_class=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"--config_file", default=SCREAMING_SNAKE_CASE__, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
), dest="save_location", )
parser.add_argument(
"--mixed_precision", choices=["no", "fp16", "bf16"], type=SCREAMING_SNAKE_CASE__, help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", default="no", )
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : Any = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" ) | 370 | 0 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase_ = logging.get_logger(__name__)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = UNetaDModel
lowerCamelCase_ = 'sample'
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[int] =4
lowercase : Tuple =3
lowercase : List[str] =(32, 32)
lowercase : Dict =floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
lowercase : Any =torch.tensor([10] ).to(UpperCAmelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Dict ={
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
lowercase : int =self.dummy_input
return init_dict, inputs_dict
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = UNetaDModel
lowerCamelCase_ = 'sample'
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] =4
lowercase : Any =4
lowercase : Any =(32, 32)
lowercase : List[Any] =floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.tensor([10] ).to(UpperCAmelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (4, 32, 32)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any ={
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowercase : str =self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase , lowercase : Dict =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase : List[Any] =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowercase , lowercase : Tuple =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCAmelCase__ )
model_accelerate.to(UpperCAmelCase__ )
model_accelerate.eval()
lowercase : int =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase : Tuple =noise.to(UpperCAmelCase__ )
lowercase : Optional[Any] =torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase__ )
lowercase : Dict =model_accelerate(UpperCAmelCase__ , UpperCAmelCase__ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowercase , lowercase : Optional[Any] =UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCAmelCase__ , low_cpu_mem_usage=UpperCAmelCase__ )
model_normal_load.to(UpperCAmelCase__ )
model_normal_load.eval()
lowercase : List[str] =model_normal_load(UpperCAmelCase__ , UpperCAmelCase__ )['''sample''']
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(UpperCAmelCase__ )
lowercase : Dict =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase : List[Any] =noise.to(UpperCAmelCase__ )
lowercase : Dict =torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase__ )
with torch.no_grad():
lowercase : Optional[int] =model(UpperCAmelCase__ , UpperCAmelCase__ ).sample
lowercase : List[str] =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase : Tuple =torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 ) )
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = UNetaDModel
lowerCamelCase_ = 'sample'
@property
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=(32, 32) ):
'''simple docstring'''
lowercase : Optional[int] =4
lowercase : Dict =3
lowercase : Optional[Any] =floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
lowercase : Dict =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Dict ={
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowercase : Union[str, Any] =self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : Dict =UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCAmelCase__ )
lowercase : int =self.dummy_input
lowercase : Tuple =floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase__ )
lowercase : Optional[int] =noise
lowercase : List[str] =model(**UpperCAmelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : str =UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(UpperCAmelCase__ )
lowercase : Union[str, Any] =4
lowercase : Tuple =3
lowercase : int =(256, 256)
lowercase : Optional[int] =torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
lowercase : Optional[Any] =torch.tensor(batch_size * [1E-4] ).to(UpperCAmelCase__ )
with torch.no_grad():
lowercase : Dict =model(UpperCAmelCase__ , UpperCAmelCase__ ).sample
lowercase : Optional[Any] =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase : int =torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict =UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(UpperCAmelCase__ )
lowercase : List[Any] =4
lowercase : List[str] =3
lowercase : Optional[Any] =(32, 32)
lowercase : Dict =torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
lowercase : Optional[Any] =torch.tensor(batch_size * [1E-4] ).to(UpperCAmelCase__ )
with torch.no_grad():
lowercase : int =model(UpperCAmelCase__ , UpperCAmelCase__ ).sample
lowercase : List[Any] =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase : str =torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
# not required for this model
pass
| 92 |
def __lowerCamelCase ( _lowercase ) -> list:
for i in range(len(_lowercase ) - 1 , 0 , -1 ):
UpperCamelCase = False
for j in range(_lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCamelCase , UpperCamelCase = unsorted[j - 1], unsorted[j]
UpperCamelCase = True
for j in range(_lowercase ):
if unsorted[j] > unsorted[j + 1]:
UpperCamelCase , UpperCamelCase = unsorted[j + 1], unsorted[j]
UpperCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input('''Enter numbers separated by a comma:\n''').strip()
_snake_case = [int(item) for item in user_input.split(''',''')]
print(F"{cocktail_shaker_sort(unsorted) = }")
| 282 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
) | 703 |
import unittest
import numpy as np
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , ) -> np.ndarray:
'''simple docstring'''
snake_case_ = np.shape(lowercase_ )
snake_case_ = np.shape(lowercase_ )
snake_case_ = np.shape(lowercase_ )
if shape_a[0] != shape_b[0]:
snake_case_ = (
"""Expected the same number of rows for A and B. """
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(lowercase_ )
if shape_b[1] != shape_c[1]:
snake_case_ = (
"""Expected the same number of columns for B and C. """
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(lowercase_ )
snake_case_ = pseudo_inv
if a_inv is None:
try:
snake_case_ = np.linalg.inv(lowercase_ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
snake_case_ = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case_ = np.block([[a, b], [b.T, c]] )
snake_case_ = np.linalg.det(lowerCamelCase )
snake_case_ = np.linalg.det(lowerCamelCase )
snake_case_ = np.linalg.det(lowerCamelCase )
self.assertAlmostEqual(lowerCamelCase , det_a * det_s )
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 161 | 0 |
"""simple docstring"""
import requests
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = {"""Content-Type""": """application/json"""}
UpperCamelCase : str = requests.post(SCREAMING_SNAKE_CASE , json={"""text""": message_body} , headers=SCREAMING_SNAKE_CASE )
if response.status_code != 200:
UpperCamelCase : str = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 102 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Tuple = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640 | 0 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_a : str = 'sshleifer/mar_enro_6_3_student'
class a_ ( a ):
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
super().setUp()
snake_case : List[str] = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=UpperCAmelCase__ , )
snake_case : Optional[Any] = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def lowerCAmelCase( self : int ):
"""simple docstring"""
MarianMTModel.from_pretrained(UpperCAmelCase__ )
@slow
@require_torch_gpu
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
snake_case : List[Any] = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
snake_case : Dict = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
snake_case : Tuple = bash_script.replace(UpperCAmelCase__ , str(UpperCAmelCase__ ) )
snake_case : Tuple = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
snake_case : Union[str, Any] = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
snake_case : str = ['''finetune.py'''] + bash_script.split() + args
with patch.object(UpperCAmelCase__ , '''argv''' , UpperCAmelCase__ ):
snake_case : str = argparse.ArgumentParser()
snake_case : List[str] = pl.Trainer.add_argparse_args(UpperCAmelCase__ )
snake_case : Dict = SummarizationModule.add_model_specific_args(UpperCAmelCase__ , os.getcwd() )
snake_case : List[str] = parser.parse_args()
snake_case : str = main(UpperCAmelCase__ )
# Check metrics
snake_case : Tuple = load_json(model.metrics_save_path )
snake_case : Optional[int] = metrics['''val'''][0]
snake_case : List[str] = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , UpperCAmelCase__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
snake_case : Dict = os.listdir(UpperCAmelCase__ )
snake_case : Any = [x for x in contents if x.endswith('''.ckpt''' )][0]
snake_case : List[str] = os.path.join(args.output_dir , UpperCAmelCase__ )
snake_case : str = torch.load(UpperCAmelCase__ , map_location='''cpu''' )
snake_case : Optional[int] = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
snake_case : int = {os.path.basename(UpperCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class a_ ( a ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Tuple = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
snake_case : Tuple = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
snake_case : str = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
snake_case : Tuple = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
snake_case : Dict = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
snake_case : Optional[int] = bash_script.replace(UpperCAmelCase__ , str(UpperCAmelCase__ ) )
snake_case : Any = self.get_auto_remove_tmp_dir()
snake_case : Optional[Any] = bash_script.replace('''--fp16''' , '''''' )
snake_case : str = 6
snake_case : Dict = (
['''distillation.py''']
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
F"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(UpperCAmelCase__ , '''argv''' , UpperCAmelCase__ ):
snake_case : int = argparse.ArgumentParser()
snake_case : Optional[int] = pl.Trainer.add_argparse_args(UpperCAmelCase__ )
snake_case : List[Any] = SummarizationDistiller.add_model_specific_args(UpperCAmelCase__ , os.getcwd() )
snake_case : int = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
snake_case : Any = distill_main(UpperCAmelCase__ )
# Check metrics
snake_case : Optional[Any] = load_json(model.metrics_save_path )
snake_case : Any = metrics['''val'''][0]
snake_case : int = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , UpperCAmelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
snake_case : List[str] = os.listdir(UpperCAmelCase__ )
snake_case : int = [x for x in contents if x.endswith('''.ckpt''' )][0]
snake_case : str = os.path.join(args.output_dir , UpperCAmelCase__ )
snake_case : Any = torch.load(UpperCAmelCase__ , map_location='''cpu''' )
snake_case : Any = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
snake_case : List[Any] = {os.path.basename(UpperCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 84 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
def UpperCamelCase ( lowercase_ ) -> list:
'''simple docstring'''
for i in range(len(lowercase_ ) - 1 , 0 , -1 ):
lowercase__ : Union[str, Any] = False
for j in range(lowercase_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : Tuple = unsorted[j - 1], unsorted[j]
lowercase__ : str = True
for j in range(lowercase_ ):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : List[Any] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ : Dict = [int(item) for item in user_input.split(""",""")]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 12 |
def UpperCamelCase ( lowercase_ ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModel.from_pretrained(__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModel.from_pretrained(__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForPreTraining.from_pretrained(__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForPreTraining.from_pretrained(__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(__a , from_pt=__a)
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__a , from_tf=__a)
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : int) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(__a , from_pt=__a)
_UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(__a , from_tf=__a)
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(__a , from_pt=__a)
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__a , from_tf=__a)
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForQuestionAnswering.from_pretrained(__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a)
self.assertIsInstance(__a , __a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=__a) , 1_44_10)
_UpperCAmelCase = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a)
self.assertIsInstance(__a , __a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=__a) , 1_44_10)
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a)
self.assertIsInstance(__a , __a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=__a) , 1_44_10)
_UpperCAmelCase = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a)
self.assertIsInstance(__a , __a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=__a) , 1_44_10)
| 707 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : Optional[int] = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
UpperCamelCase : List[str] = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_INIT_CONFIGURATION
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = RealmTokenizer
def __init__( self : str , lowerCamelCase__ : str=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any="[UNK]" , lowerCamelCase__ : Optional[int]="[SEP]" , lowerCamelCase__ : Optional[Any]="[PAD]" , lowerCamelCase__ : Optional[int]="[CLS]" , lowerCamelCase__ : Tuple="[MASK]" , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : str , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
a__ : Optional[Any] = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
a__ : List[str] = do_lower_case
a__ : List[Any] = strip_accents
a__ : Optional[Any] = tokenize_chinese_chars
a__ : Optional[int] = normalizer_class(**lowerCamelCase__ )
a__ : Union[str, Any] = do_lower_case
def _UpperCamelCase( self : Any , lowerCamelCase__ : int , **lowerCamelCase__ : int ):
a__ : List[str] = PaddingStrategy.MAX_LENGTH
a__ : Dict = text
a__ : Optional[int] = kwargs.pop("text_pair" , lowerCamelCase__ )
a__ : Dict = kwargs.pop("return_tensors" , lowerCamelCase__ )
a__ : List[Any] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(lowerCamelCase__ ):
if batch_text_pair is not None:
a__ : Dict = batch_text_pair[idx]
else:
a__ : Union[str, Any] = None
a__ : int = super().__call__(lowerCamelCase__ , lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
a__ : Union[str, Any] = encoded_candidates.get("input_ids" )
a__ : List[Any] = encoded_candidates.get("attention_mask" )
a__ : int = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCamelCase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCamelCase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCamelCase__ )
a__ : Union[str, Any] = {key: item for key, item in output_data.items() if len(lowerCamelCase__ ) != 0}
return BatchEncoding(lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str=None ):
a__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : List[Any] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : Tuple = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 37 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Optional[int] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 637 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = '▁'
a : List[str] = {'vocab_file': 'spiece.model'}
a : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
a : Optional[int] = {
'google/pegasus-xsum': 512,
}
a : List[str] = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : Union[str, Any]="<mask_2>" , lowercase_ : List[str]="<mask_1>" , lowercase_ : Optional[int]=None , lowercase_ : str=103 , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Any , ):
snake_case_ = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
F"additional_special_tokens should be of type {type(lowercase_ )}, but is"
F" {type(lowercase_ )}" )
snake_case_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ = additional_special_tokens_extended
else:
snake_case_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , pad_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
snake_case_ = mask_token_sent
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# add special tokens to encoder dict
snake_case_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
snake_case_ = {v: k for k, v in self.encoder.items()}
@property
def A_ ( self : Dict ):
return len(self.sp_model ) + self.offset
def A_ ( self : Dict ):
snake_case_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Optional[int] , lowercase_ : Any ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : str , lowercase_ : str ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : str ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
snake_case_ = self.sp_model.piece_to_id(lowercase_ )
return sp_id + self.offset
def A_ ( self : Dict , lowercase_ : int ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
snake_case_ = self.sp_model.IdToPiece(index - self.offset )
return token
def A_ ( self : Union[str, Any] , lowercase_ : Any ):
snake_case_ = []
snake_case_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
snake_case_ = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def A_ ( self : Optional[int] , lowercase_ : Optional[int]=False ):
return 1
def A_ ( self : List[Any] , lowercase_ : Dict ):
snake_case_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def A_ ( self : Tuple , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A_ ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A_ ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 593 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __magic_name__ ( *__UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase=True, __UpperCAmelCase=2 ) -> int:
'''simple docstring'''
from .. import __version__
snake_case_ = take_from
snake_case_ = ()
if not isinstance(args[0], __UpperCAmelCase ):
snake_case_ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse(__UpperCAmelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
snake_case_ = None
if isinstance(__UpperCAmelCase, __UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCAmelCase ),)
snake_case_ = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCAmelCase, __UpperCAmelCase ):
values += (getattr(__UpperCAmelCase, __UpperCAmelCase ),)
snake_case_ = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
snake_case_ = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
snake_case_ = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message, __UpperCAmelCase, stacklevel=__UpperCAmelCase )
if isinstance(__UpperCAmelCase, __UpperCAmelCase ) and len(__UpperCAmelCase ) > 0:
snake_case_ = inspect.getouterframes(inspect.currentframe() )[1]
snake_case_ = call_frame.filename
snake_case_ = call_frame.lineno
snake_case_ = call_frame.function
snake_case_ ,snake_case_ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCAmelCase ) == 0:
return
elif len(__UpperCAmelCase ) == 1:
return values[0]
return values
| 593 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any]=14 , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Tuple=99 , _lowerCAmelCase : int=32 , _lowerCAmelCase : str=5 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Optional[Any]=37 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : int=512 , _lowerCAmelCase : str=16 , _lowerCAmelCase : int=2 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : str=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : str=None , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_token_type_ids
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = use_mc_token_ids
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = self.vocab_size - 1
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
if self.use_mc_token_ids:
__lowercase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
__lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , *_lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = CTRLModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , *_lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = CTRLLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def _a ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , *_lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = CTRLForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__snake_case :Optional[int] = (CTRLLMHeadModel,) if is_torch_available() else ()
__snake_case :List[Any] = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case :Dict = True
__snake_case :Union[str, Any] = False
__snake_case :str = False
def _a ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = CTRLModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _a ( self : str ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@slow
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = CTRLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Dict ) -> Any:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(_lowerCAmelCase )
__lowercase = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=_lowerCAmelCase ) # Legal the president is
__lowercase = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowercase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase )
| 80 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__A = logging.get_logger(__name__)
def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int]=False ) -> List[str]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
UpperCAmelCase_= os.path.abspath(lowerCAmelCase_ )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location="""cpu""" )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase_= convert_pytorch_state_dict_to_flax(lowerCAmelCase_ ,lowerCAmelCase_ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase_= convert_pytorch_sharded_state_dict_to_flax(lowerCAmelCase_ ,lowerCAmelCase_ )
return flax_state_dict
def __a ( lowerCAmelCase_ : Tuple[str] ,lowerCAmelCase_ : np.ndarray ,lowerCAmelCase_ : Dict[str, jnp.ndarray] ,lowerCAmelCase_ : str ,) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(lowerCAmelCase_ : Tuple[str] ) -> bool:
return len(set(lowerCAmelCase_ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase_= pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase_= pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase_= pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase_= pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_= pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
UpperCAmelCase_= pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_= pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowerCAmelCase_ ):
UpperCAmelCase_= pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_= pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_= pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase_= None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase_= pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase_= pt_tuple_key[-2] + """_v"""
if name is not None:
UpperCAmelCase_= pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_= flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase_= flax_model.params["""params"""]
else:
UpperCAmelCase_= flax_model.params
UpperCAmelCase_= flatten_dict(lowerCAmelCase_ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_= flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(lowerCAmelCase_ )
UpperCAmelCase_= {}
UpperCAmelCase_= (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_= (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_= tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase_= pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_= pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_, UpperCAmelCase_= rename_key_and_reshape_tensor(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# add model prefix if necessary
UpperCAmelCase_= (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_= (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
return unflatten_dict(lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
import torch
# Load the index
UpperCAmelCase_= {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase_= torch.load(lowerCAmelCase_ )
UpperCAmelCase_= {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_= flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_= flax_model.params["""params"""]
UpperCAmelCase_= flatten_dict(lowerCAmelCase_ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
UpperCAmelCase_= flax_model.params
UpperCAmelCase_= flatten_dict(lowerCAmelCase_ )
UpperCAmelCase_= (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_= (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_= tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase_= pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_= pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_, UpperCAmelCase_= rename_key_and_reshape_tensor(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# add model prefix if necessary
UpperCAmelCase_= (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_= (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
continue
if "var" in flax_key[-1]:
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_= jnp.asarray(lowerCAmelCase_ )
return unflatten_dict(lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= os.path.abspath(lowerCAmelCase_ )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase_= getattr(lowerCAmelCase_ ,"""Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(lowerCAmelCase_ ,"""rb""" ) as state_f:
try:
UpperCAmelCase_= from_bytes(lowerCAmelCase_ ,state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCAmelCase_= flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase_ : x.dtype == jnp.bfloataa ,lowerCAmelCase_ ) ).values()
if any(lowerCAmelCase_ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCAmelCase_= jax.tree_util.tree_map(
lambda lowerCAmelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,lowerCAmelCase_ )
UpperCAmelCase_= flatten_dict(lowerCAmelCase_ )
UpperCAmelCase_= pt_model.state_dict()
UpperCAmelCase_= (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase_= (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase_= []
UpperCAmelCase_= set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_= flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase_= """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_= flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_= (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowerCAmelCase_ ) not in pt_model_dict:
# conv layer
UpperCAmelCase_= flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase_= jnp.transpose(lowerCAmelCase_ ,(3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ) not in pt_model_dict:
# linear layer
UpperCAmelCase_= flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase_= flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase_= flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase_= flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase_= flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
UpperCAmelCase_= """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase_= """.""".join(lowerCAmelCase_ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase_= {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase_= key.split(""".""" )
UpperCAmelCase_= None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase_= key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase_= key_components[-2] + """_v"""
if name is not None:
UpperCAmelCase_= key_components[:-3] + [name]
UpperCAmelCase_= """.""".join(lowerCAmelCase_ )
UpperCAmelCase_= key
if flax_key in special_pt_names:
UpperCAmelCase_= special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase_= np.asarray(lowerCAmelCase_ ) if not isinstance(lowerCAmelCase_ ,np.ndarray ) else flax_tensor
UpperCAmelCase_= torch.from_numpy(lowerCAmelCase_ )
# remove from missing keys
missing_keys.remove(lowerCAmelCase_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase_ )
pt_model.load_state_dict(lowerCAmelCase_ )
# re-transform missing_keys to list
UpperCAmelCase_= list(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(lowerCAmelCase_ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"""If your task is similar to the task the model of the checkpoint was trained on, """
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 593 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case ( snake_case_ ):
UpperCAmelCase__ = """realm"""
def __init__(self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-1_2 , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=1e-3 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=3_20 , SCREAMING_SNAKE_CASE_=13_35_37_18 , SCREAMING_SNAKE_CASE_=50_00 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Common config
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = retriever_proj_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_candidates
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE_ = span_hidden_size
SCREAMING_SNAKE_CASE_ = max_span_width
SCREAMING_SNAKE_CASE_ = reader_layer_norm_eps
SCREAMING_SNAKE_CASE_ = reader_beam_size
SCREAMING_SNAKE_CASE_ = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE_ = num_block_records
SCREAMING_SNAKE_CASE_ = searcher_beam_size | 701 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 628 | 0 |
def __UpperCamelCase ( _A ):
if not isinstance(_A , _A ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
lowerCAmelCase_ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 431 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A :
__snake_case = MBartConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=7, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=99, UpperCamelCase__=32, UpperCamelCase__=2, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=20, UpperCamelCase__=2, UpperCamelCase__=1, UpperCamelCase__=0, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor], axis=1 )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCAmelCase_ = prepare_mbart_inputs_dict(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = TFMBartModel(config=UpperCamelCase__ ).get_decoder()
lowerCAmelCase_ = inputs_dict['''input_ids''']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase_ = inputs_dict['''head_mask''']
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__, use_cache=UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
lowerCAmelCase_ = past_key_values[1]
def __UpperCamelCase ( _A , _A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , ):
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__snake_case = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFMBartModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A ( unittest.TestCase ):
__snake_case = [
' UN Chief Says There Is No Military Solution in Syria',
]
__snake_case = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__snake_case = 'facebook/mbart-large-en-ro'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.translate_src_text(**UpperCamelCase__ )
self.assertListEqual(self.expected_text, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer(self.src_text, **UpperCamelCase__, return_tensors='''tf''' )
lowerCAmelCase_ = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 )
lowerCAmelCase_ = self.tokenizer.batch_decode(UpperCamelCase__, skip_special_tokens=UpperCamelCase__ )
return generated_words
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 431 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class a__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Tuple , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : List[str]):
"""simple docstring"""
super().__init__(features=UpperCamelCase_)
__UpperCAmelCase : Tuple = torch_tensor_kwargs
import torch # noqa import torch at initialization
def a_ ( self : int , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(UpperCamelCase_)
return column
def a_ ( self : List[str] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_))):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__UpperCAmelCase : Union[str, Any] = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
__UpperCAmelCase : List[str] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__UpperCAmelCase : Optional[Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image):
__UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_)
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs})
def a_ ( self : List[Any] , UpperCamelCase_ : List[str]):
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__") and not isinstance(UpperCamelCase_ , torch.Tensor):
__UpperCAmelCase : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_) for substruct in data_struct])
elif isinstance(UpperCamelCase_ , (list, tuple)):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_) for substruct in data_struct])
return self._tensorize(UpperCamelCase_)
def a_ ( self : Tuple , UpperCamelCase_ : dict):
"""simple docstring"""
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_)
def a_ ( self : List[str] , UpperCamelCase_ : pa.Table):
"""simple docstring"""
__UpperCAmelCase : Dict = self.numpy_arrow_extractor().extract_row(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_)
return self.recursive_tensorize(UpperCamelCase_)
def a_ ( self : Optional[int] , UpperCamelCase_ : pa.Table):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_)
__UpperCAmelCase : str = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0])
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_)
__UpperCAmelCase : Tuple = self._consolidate(UpperCamelCase_)
return column
def a_ ( self : Dict , UpperCamelCase_ : pa.Table):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_)
__UpperCAmelCase : List[Any] = self.python_features_decoder.decode_batch(UpperCamelCase_)
__UpperCAmelCase : Any = self.recursive_tensorize(UpperCamelCase_)
for column_name in batch:
__UpperCAmelCase : List[Any] = self._consolidate(batch[column_name])
return batch
| 715 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class a__ ( __magic_name__ ):
def __init__( self : List[Any] , UpperCamelCase_ : pyspark.sql.DataFrame , UpperCamelCase_ : Optional[NamedSplit] = None , UpperCamelCase_ : Optional[Features] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "arrow" , **UpperCamelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(
split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : List[Any] = load_from_cache_file
__UpperCAmelCase : List[Any] = file_format
__UpperCAmelCase : str = Spark(
df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , )
def a_ ( self : Union[str, Any]):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
__UpperCAmelCase : List[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 487 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
a :List[Any] = 8
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=BITS ) -> int:
SCREAMING_SNAKE_CASE__ : Any = x.device
SCREAMING_SNAKE_CASE__ : Optional[int] = (x * 255).int().clamp(0 , 255 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = rearrange(__lowerCAmelCase , """d -> d 1 1""" )
SCREAMING_SNAKE_CASE__ : Dict = rearrange(__lowerCAmelCase , """b c h w -> b c 1 h w""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ((x & mask) != 0).float()
SCREAMING_SNAKE_CASE__ : List[Any] = rearrange(__lowerCAmelCase , """b c d h w -> b (c d) h w""" )
SCREAMING_SNAKE_CASE__ : List[str] = bits * 2 - 1
return bits
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=BITS ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] = x.device
SCREAMING_SNAKE_CASE__ : Optional[Any] = (x > 0).int()
SCREAMING_SNAKE_CASE__ : int = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase , dtype=torch.intaa )
SCREAMING_SNAKE_CASE__ : Tuple = rearrange(__lowerCAmelCase , """d -> d 1 1""" )
SCREAMING_SNAKE_CASE__ : int = rearrange(__lowerCAmelCase , """b (c d) h w -> b c d h w""" , d=8 )
SCREAMING_SNAKE_CASE__ : Optional[int] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def _lowercase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = True , __lowerCAmelCase=None , __lowerCAmelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
SCREAMING_SNAKE_CASE__ : List[str] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
SCREAMING_SNAKE_CASE__ : Tuple = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
SCREAMING_SNAKE_CASE__ : List[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
SCREAMING_SNAKE_CASE__ : str = self.bit_scale
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ : Dict = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
SCREAMING_SNAKE_CASE__ : Dict = self._get_variance(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
SCREAMING_SNAKE_CASE__ : List[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Optional[int] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
SCREAMING_SNAKE_CASE__ : Tuple = model_output.device if torch.is_tensor(__lowerCAmelCase ) else """cpu"""
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = self._get_variance(__lowerCAmelCase , __lowerCAmelCase ) ** 0.5 * eta * noise
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def _lowercase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="epsilon" , __lowerCAmelCase=None , __lowerCAmelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
SCREAMING_SNAKE_CASE__ : Any = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__ : List[Any] = self.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one
SCREAMING_SNAKE_CASE__ : List[str] = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
SCREAMING_SNAKE_CASE__ : Tuple = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
SCREAMING_SNAKE_CASE__ : List[str] = self.bit_scale
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__ : Dict = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
if t > 0:
SCREAMING_SNAKE_CASE__ : int = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__lowerCAmelCase ).to(model_output.device )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (self._get_variance(__lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a , _a = 1.0 , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Any = bit_scale
SCREAMING_SNAKE_CASE__ : int = (
ddim_bit_scheduler_step if isinstance(_a , _a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 256 , _a = 256 , _a = 50 , _a = None , _a = 1 , _a = "pil" , _a = True , **_a , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_a , )
SCREAMING_SNAKE_CASE__ : Tuple = decimal_to_bits(_a ) * self.bit_scale
SCREAMING_SNAKE_CASE__ : List[str] = latents.to(self.device )
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
SCREAMING_SNAKE_CASE__ : Dict = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler.step(_a , _a , _a ).prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = bits_to_decimal(_a )
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : Optional[int] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 680 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=0 , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE__ : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = type_vocab_size
SCREAMING_SNAKE_CASE__ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Any = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : Any = scope
SCREAMING_SNAKE_CASE__ : int = projection_dim
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE__ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
SCREAMING_SNAKE_CASE__ : str = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRContextEncoder(config=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : str = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFDPRQuestionEncoder(config=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : List[str] = model(_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFDPRReader(config=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE :int = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :Dict = False
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFDPRQuestionEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRReader.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE__ : Tuple = model(_a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Any = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 680 | 1 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = (UnCLIPScheduler,)
def A ( self : Optional[int] , **__snake_case : int ) -> int:
UpperCAmelCase : List[Any] = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__snake_case )
return config
def A ( self : Dict ) -> Optional[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def A ( self : Dict ) -> str:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__snake_case )
def A ( self : str ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__snake_case )
def A ( self : List[str] ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__snake_case )
def A ( self : Dict ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__snake_case )
def A ( self : List[Any] ) -> Dict:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__snake_case , prev_timestep=__snake_case )
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : List[Any] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config(variance_type='''fixed_small_log''' )
UpperCAmelCase : Dict = scheduler_class(**__snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : Tuple = self.scheduler_classes[0]
UpperCAmelCase : Any = self.get_scheduler_config(variance_type='''learned_range''' )
UpperCAmelCase : int = scheduler_class(**__snake_case )
UpperCAmelCase : Optional[int] = 0.5
assert scheduler._get_variance(1 , predicted_variance=__snake_case ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__snake_case ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__snake_case ) - -0.0_01_00_11 < 1E-5
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : List[str] = self.scheduler_classes[0]
UpperCAmelCase : str = self.get_scheduler_config()
UpperCAmelCase : List[str] = scheduler_class(**__snake_case )
UpperCAmelCase : Tuple = scheduler.timesteps
UpperCAmelCase : List[str] = self.dummy_model()
UpperCAmelCase : List[Any] = self.dummy_sample_deter
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(__snake_case ):
# 1. predict noise residual
UpperCAmelCase : List[str] = model(__snake_case , __snake_case )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase : Dict = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
UpperCAmelCase : Tuple = pred_prev_sample
UpperCAmelCase : Dict = torch.sum(torch.abs(__snake_case ) )
UpperCAmelCase : str = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase : List[Any] = self.scheduler_classes[0]
UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[int] = scheduler_class(**__snake_case )
scheduler.set_timesteps(25 )
UpperCAmelCase : int = scheduler.timesteps
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter
UpperCAmelCase : str = torch.manual_seed(0 )
for i, t in enumerate(__snake_case ):
# 1. predict noise residual
UpperCAmelCase : Union[str, Any] = model(__snake_case , __snake_case )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase : List[Any] = None
else:
UpperCAmelCase : Optional[int] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase : List[str] = scheduler.step(
__snake_case , __snake_case , __snake_case , prev_timestep=__snake_case , generator=__snake_case ).prev_sample
UpperCAmelCase : Optional[int] = pred_prev_sample
UpperCAmelCase : Dict = torch.sum(torch.abs(__snake_case ) )
UpperCAmelCase : List[str] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A ( self : Dict ) -> Union[str, Any]:
pass
def A ( self : List[str] ) -> Optional[int]:
pass
| 706 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCamelCase__: List[str] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : str , __snake_case : Tuple , __snake_case : int , __snake_case : List[Any]=None , __snake_case : List[Any]=None ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = self.layer[current_layer](__snake_case , __snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , A__ , )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : int , __snake_case : Any ) -> Any:
super().__init__(__snake_case )
UpperCAmelCase : Any = BertEncoderWithPabee(__snake_case )
self.init_weights()
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = 0
def A ( self : List[str] , __snake_case : List[str] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = threshold
def A ( self : str , __snake_case : List[str] ) -> Optional[Any]:
UpperCAmelCase : List[str] = patience
def A ( self : Dict ) -> str:
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Dict = 0
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(__snake_case )
@add_start_docstrings_to_model_forward(__snake_case )
def A ( self : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : int=None , __snake_case : Optional[Any]=None , __snake_case : Tuple=None , __snake_case : Tuple=None , __snake_case : Tuple=None , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=False , ) -> List[str]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
UpperCAmelCase : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : List[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
UpperCAmelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Optional[Any] = torch.ones(__snake_case , device=__snake_case )
if token_type_ids is None:
UpperCAmelCase : Tuple = torch.zeros(__snake_case , dtype=torch.long , device=__snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(__snake_case , __snake_case , __snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = encoder_hidden_states.size()
UpperCAmelCase : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : str = torch.ones(__snake_case , device=__snake_case )
UpperCAmelCase : Tuple = self.invert_attention_mask(__snake_case )
else:
UpperCAmelCase : List[str] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : List[Any] = self.get_head_mask(__snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Any = self.embeddings(
input_ids=__snake_case , position_ids=__snake_case , token_type_ids=__snake_case , inputs_embeds=__snake_case )
UpperCAmelCase : Optional[int] = embedding_output
if self.training:
UpperCAmelCase : str = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : Optional[int] = self.encoder.adaptive_forward(
__snake_case , current_layer=__snake_case , attention_mask=__snake_case , head_mask=__snake_case )
UpperCAmelCase : List[str] = self.pooler(__snake_case )
UpperCAmelCase : Dict = output_layers[i](output_dropout(__snake_case ) )
res.append(__snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Any = self.encoder(
__snake_case , attention_mask=__snake_case , head_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : Dict = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](__snake_case )]
else:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Optional[Any] = self.encoder.adaptive_forward(
__snake_case , current_layer=__snake_case , attention_mask=__snake_case , head_mask=__snake_case )
UpperCAmelCase : List[str] = self.pooler(__snake_case )
UpperCAmelCase : Optional[int] = output_layers[i](__snake_case )
if regression:
UpperCAmelCase : Union[str, Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : List[str] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[str] = logits
if patient_counter == self.patience:
break
UpperCAmelCase : List[Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , A__ , )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : str , __snake_case : str ) -> Optional[Any]:
super().__init__(__snake_case )
UpperCAmelCase : Optional[int] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(__snake_case )
UpperCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Dict = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__snake_case )
def A ( self : Any , __snake_case : Dict=None , __snake_case : str=None , __snake_case : List[Any]=None , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : str=None , ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.bert(
input_ids=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : List[str] = (logits[-1],)
if labels is not None:
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = 0
for ix, logits_item in enumerate(__snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Optional[Any] = MSELoss()
UpperCAmelCase : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Dict = CrossEntropyLoss()
UpperCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Any = (total_loss / total_weights,) + outputs
return outputs
| 528 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _a ( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = 3_84
SCREAMING_SNAKE_CASE__ : Optional[Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 96
SCREAMING_SNAKE_CASE__ : Optional[int] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[str] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : List[Any] = 96
SCREAMING_SNAKE_CASE__ : Optional[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Any = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Dict = 1_28
SCREAMING_SNAKE_CASE__ : Optional[int] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : int = 12
SCREAMING_SNAKE_CASE__ : Tuple = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : List[str] = 1_92
SCREAMING_SNAKE_CASE__ : Optional[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Dict = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[str] = 12
SCREAMING_SNAKE_CASE__ : Tuple = 7_68
# set label information
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_50
SCREAMING_SNAKE_CASE__ : Optional[int] = "huggingface/label-files"
SCREAMING_SNAKE_CASE__ : Optional[int] = "ade20k-id2label.json"
SCREAMING_SNAKE_CASE__ : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE__ : List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE__ : Any = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , )
return config
def _a ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _a ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = val
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-dim :]
# fmt: on
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = x.shape
SCREAMING_SNAKE_CASE__ : Tuple = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = x.shape
SCREAMING_SNAKE_CASE__ : int = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE__ : Any = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = x.shape[0]
SCREAMING_SNAKE_CASE__ : Dict = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
SCREAMING_SNAKE_CASE__ : Tuple = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" , file_name=SCREAMING_SNAKE_CASE__ )[
"state_dict"
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
SCREAMING_SNAKE_CASE__ : Dict = get_upernet_config(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("bn" , "batch_norm" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
# rename keys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Dict = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify on image
SCREAMING_SNAKE_CASE__ : Optional[int] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert("RGB" )
SCREAMING_SNAKE_CASE__ : Any = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : int = processor(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 663 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCamelCase_ : List[str] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCamelCase_ : str = "UperNetConfig"
class _lowerCamelCase (nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1 , ):
super().__init__()
__snake_case = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ , )
__snake_case = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
__snake_case = nn.ReLU()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.conv(SCREAMING_SNAKE_CASE_ )
__snake_case = self.batch_norm(SCREAMING_SNAKE_CASE_ )
__snake_case = self.activation(SCREAMING_SNAKE_CASE_ )
return output
class _lowerCamelCase (nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
__snake_case = [
nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE_ ),
UperNetConvModule(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = input
for layer in self.layers:
__snake_case = layer(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowerCamelCase (nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
__snake_case = pool_scales
__snake_case = align_corners
__snake_case = in_channels
__snake_case = channels
__snake_case = []
for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE_ ):
__snake_case = UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , channels=SCREAMING_SNAKE_CASE_ )
self.blocks.append(SCREAMING_SNAKE_CASE_ )
self.add_module(str(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = []
for ppm in self.blocks:
__snake_case = ppm(SCREAMING_SNAKE_CASE_ )
__snake_case = nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(SCREAMING_SNAKE_CASE_ )
return ppm_outs
class _lowerCamelCase (nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
__snake_case = config
__snake_case = config.pool_scales # e.g. (1, 2, 3, 6)
__snake_case = in_channels
__snake_case = config.hidden_size
__snake_case = False
__snake_case = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__snake_case = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__snake_case = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__snake_case = nn.ModuleList()
__snake_case = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__snake_case = UperNetConvModule(SCREAMING_SNAKE_CASE_ , self.channels , kernel_size=1 )
__snake_case = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(SCREAMING_SNAKE_CASE_ )
self.fpn_convs.append(SCREAMING_SNAKE_CASE_ )
__snake_case = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __lowerCamelCase ( self ):
self.apply(self._init_weights )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = inputs[-1]
__snake_case = [x]
psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE_ ) )
__snake_case = torch.cat(SCREAMING_SNAKE_CASE_ , dim=1 )
__snake_case = self.bottleneck(SCREAMING_SNAKE_CASE_ )
return output
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
# build laterals
__snake_case = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE_ ) )
# build top-down path
__snake_case = len(SCREAMING_SNAKE_CASE_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case = laterals[i - 1].shape[2:]
__snake_case = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=SCREAMING_SNAKE_CASE_ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
__snake_case = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
__snake_case = torch.cat(SCREAMING_SNAKE_CASE_ , dim=1 )
__snake_case = self.fpn_bottleneck(SCREAMING_SNAKE_CASE_ )
__snake_case = self.classifier(SCREAMING_SNAKE_CASE_ )
return output
class _lowerCamelCase (nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 1 ):
super().__init__()
__snake_case = config
__snake_case = config.auxiliary_in_channels
__snake_case = config.auxiliary_channels
__snake_case = config.auxiliary_num_convs
__snake_case = config.auxiliary_concat_input
__snake_case = in_index
__snake_case = (kernel_size // 2) * dilation
__snake_case = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ ) )
if self.num_convs == 0:
__snake_case = nn.Identity()
else:
__snake_case = nn.Sequential(*SCREAMING_SNAKE_CASE_ )
if self.concat_input:
__snake_case = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 )
__snake_case = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __lowerCamelCase ( self ):
self.apply(self._init_weights )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
# just take the relevant feature maps
__snake_case = encoder_hidden_states[self.in_index]
__snake_case = self.convs(SCREAMING_SNAKE_CASE_ )
if self.concat_input:
__snake_case = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__snake_case = self.classifier(SCREAMING_SNAKE_CASE_ )
return output
class _lowerCamelCase (lowerCamelCase ):
lowercase__ = UperNetConfig
lowercase__ = """pixel_values"""
lowercase__ = True
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __lowerCamelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = value
lowerCamelCase_ : Any = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowerCamelCase_ : List[str] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCamelCase , )
class _lowerCamelCase (lowerCamelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ ):
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__snake_case = UperNetHead(SCREAMING_SNAKE_CASE_ , in_channels=self.backbone.channels )
__snake_case = UperNetFCNHead(SCREAMING_SNAKE_CASE_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = output_attentions if output_attentions is not None else self.config.output_attentions
__snake_case = self.backbone.forward_with_filtered_kwargs(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
__snake_case = outputs.feature_maps
__snake_case = self.decode_head(SCREAMING_SNAKE_CASE_ )
__snake_case = nn.functional.interpolate(SCREAMING_SNAKE_CASE_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ )
__snake_case = None
if self.auxiliary_head is not None:
__snake_case = self.auxiliary_head(SCREAMING_SNAKE_CASE_ )
__snake_case = nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ )
__snake_case = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
__snake_case = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__snake_case = (logits,) + outputs[1:]
else:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 707 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase_ : Optional[int] = TypeVar("T")
class _lowerCamelCase (Generic[T] ):
def __init__( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = data
__snake_case = None
def __str__( self ):
return f'''{self.data}'''
class _lowerCamelCase (Generic[T] ):
def __init__( self ):
__snake_case = None
def __iter__( self ):
__snake_case = self.top
while node:
yield node.data
__snake_case = node.next
def __str__( self ):
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def __lowerCamelCase ( self ):
return self.top is None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
if not self.is_empty():
__snake_case = self.top
__snake_case = node
def __lowerCamelCase ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , SCREAMING_SNAKE_CASE_ )
__snake_case = self.top
__snake_case = self.top.next
return pop_node.data
def __lowerCamelCase ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowerCamelCase ( self ):
__snake_case = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 345 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.