code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(UpperCamelCase__ , x % y )
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 20 ) -> int:
'''simple docstring'''
_snake_case = 1
for i in range(1 , n + 1 ):
_snake_case = lcm(UpperCamelCase__ , UpperCamelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 295
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295
| 1
|
from PIL import Image
def lowerCamelCase__ ( UpperCamelCase__ : Image , UpperCamelCase__ : float ) -> Image:
'''simple docstring'''
def brightness(UpperCamelCase__ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
UpperCAmelCase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 295
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
| 1
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase_ = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowerCAmelCase_ ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case = deprecated_arg[3:]
_snake_case = not kwargs.pop(lowerCAmelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_snake_case = kwargs.pop('tpu_name' , self.tpu_name )
_snake_case = kwargs.pop('device_idx' , self.device_idx )
_snake_case = kwargs.pop('eager_mode' , self.eager_mode )
_snake_case = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**lowerCAmelCase_ )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''Name of TPU'''} , )
lowerCAmelCase_ = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
lowerCAmelCase_ = field(default=_lowerCamelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def lowerCAmelCase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
_snake_case = None
if self.tpu:
try:
if self.tpu_name:
_snake_case = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case = None
return tpu
@cached_property
def lowerCAmelCase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
_snake_case = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
_snake_case = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCAmelCase ( self ) -> bool:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def lowerCAmelCase ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def lowerCAmelCase ( self ) -> Union[str, Any]:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def lowerCAmelCase ( self ) -> int:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase ( self ) -> bool:
return self.n_gpu > 0
| 295
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase_ = '''LayoutLMv2ImageProcessor'''
lowerCAmelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Optional[Any]:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs.pop('feature_extractor' )
_snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
_snake_case = self.image_processor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
_snake_case = features['words']
_snake_case = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# add pixel values
_snake_case = features.pop('pixel_values' )
if return_overflowing_tokens is True:
_snake_case = self.get_overflowing_images(lowerCAmelCase_ , encoded_inputs['overflow_to_sample_mapping'] )
_snake_case = images
return encoded_inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}''' )
return images_with_overflow
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCAmelCase ( self ) -> List[str]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase_ , )
return self.image_processor_class
@property
def lowerCAmelCase ( self ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase_ , )
return self.image_processor
| 295
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
UpperCAmelCase_ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''albert'''
def __init__( self , lowerCAmelCase_=3_0000 , lowerCAmelCase_=128 , lowerCAmelCase_=4096 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=64 , lowerCAmelCase_=1_6384 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=512 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = embedding_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_hidden_groups
_snake_case = num_attention_heads
_snake_case = inner_group_num
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = classifier_dropout_prob
_snake_case = position_embedding_type
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 295
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase ( self ) -> Dict:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
| 295
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295
| 1
|
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 ) -> None:
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowerCAmelCase_ )] for r in range(lowerCAmelCase_ )]
def __str__( self ) -> str:
_snake_case = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowerCAmelCase_ , len(str(lowerCAmelCase_ ) ) )
_snake_case = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCAmelCase_ ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCAmelCase_ ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> bool:
if not (isinstance(lowerCAmelCase_ , (list, tuple) ) and len(lowerCAmelCase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCAmelCase_ ) -> Any:
assert self.validate_indicies(lowerCAmelCase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
assert self.validate_indicies(lowerCAmelCase_ )
_snake_case = value
def __add__( self , lowerCAmelCase_ ) -> Matrix:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self , lowerCAmelCase_ ) -> Matrix:
return self + (-another)
def __mul__( self , lowerCAmelCase_ ) -> Matrix:
if isinstance(lowerCAmelCase_ , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = F'''Unsupported type given for another ({type(lowerCAmelCase_ )})'''
raise TypeError(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Matrix:
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}''' )
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 295
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295
| 1
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
UpperCAmelCase_ = """bert-base-cased"""
UpperCAmelCase_ = """google/pegasus-xsum"""
UpperCAmelCase_ = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
UpperCAmelCase_ = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
UpperCAmelCase_ = """patrickvonplaten/t5-tiny-random"""
UpperCAmelCase_ = """sshleifer/bart-tiny-random"""
UpperCAmelCase_ = """sshleifer/tiny-mbart"""
UpperCAmelCase_ = """sshleifer/tiny-marian-en-de"""
def lowerCamelCase__ ( UpperCamelCase__ : Path , UpperCamelCase__ : list ) -> Dict:
'''simple docstring'''
_snake_case = '\n'.join(UpperCamelCase__ )
Path(UpperCamelCase__ ).open('w' ).writelines(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase__ , F'''{split}.source''' ) , UpperCamelCase__ )
_dump_articles(os.path.join(UpperCamelCase__ , F'''{split}.target''' ) , UpperCamelCase__ )
return tmp_dir
class UpperCamelCase_ ( _lowerCamelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_snake_case = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case = max(len(tokenizer.encode(lowerCAmelCase_ ) ) for a in ARTICLES )
_snake_case = max(len(tokenizer.encode(lowerCAmelCase_ ) ) for a in SUMMARIES )
_snake_case = 4
_snake_case = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case = SeqaSeqDataset(
lowerCAmelCase_ , data_dir=lowerCAmelCase_ , type_path='train' , max_source_length=lowerCAmelCase_ , max_target_length=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , )
_snake_case = DataLoader(lowerCAmelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_snake_case = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case = max(len(tokenizer.encode(lowerCAmelCase_ ) ) for a in ARTICLES )
_snake_case = max(len(tokenizer.encode(lowerCAmelCase_ ) ) for a in SUMMARIES )
_snake_case = 4
_snake_case = LegacySeqaSeqDataset(
lowerCAmelCase_ , data_dir=lowerCAmelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCAmelCase_ , )
_snake_case = DataLoader(lowerCAmelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCAmelCase_ , lowerCAmelCase_ , 128 , lowerCAmelCase_ )
_snake_case = {x.name for x in tmp_dir.iterdir()}
_snake_case = {x.name for x in save_dir.iterdir()}
_snake_case = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCAmelCase_ ) < len(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCAmelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def lowerCAmelCase ( self ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case = self._get_dataset(max_len=64 )
_snake_case = 64
_snake_case = ds.make_dynamic_sampler(lowerCAmelCase_ , required_batch_size_multiple=lowerCAmelCase_ )
_snake_case = [len(lowerCAmelCase_ ) for x in batch_sampler]
assert len(set(lowerCAmelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) # no dropped or added examples
_snake_case = DataLoader(lowerCAmelCase_ , batch_sampler=lowerCAmelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case = []
_snake_case = []
for batch in data_loader:
_snake_case = batch['input_ids'].shape
_snake_case = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCAmelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCAmelCase_ )
assert num_src_per_batch[0] == max(lowerCAmelCase_ )
if failures:
raise AssertionError(F'''too many tokens in {len(lowerCAmelCase_ )} batches''' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case , _snake_case = self._get_dataset(max_len=512 )
_snake_case = 2
_snake_case = ds.make_sortish_sampler(lowerCAmelCase_ , shuffle=lowerCAmelCase_ )
_snake_case = DataLoader(lowerCAmelCase_ , batch_size=lowerCAmelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case = DataLoader(lowerCAmelCase_ , batch_size=lowerCAmelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCAmelCase_ )
_snake_case = tokenizer.pad_token_id
def count_pad_tokens(lowerCAmelCase_ , lowerCAmelCase_="input_ids" ):
return [batch[k].eq(lowerCAmelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCAmelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCAmelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCAmelCase_ ) ) < sum(count_pad_tokens(lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_=1000 , lowerCAmelCase_=128 ) -> List[Any]:
if os.getenv('USE_REAL_DATA' , lowerCAmelCase_ ):
_snake_case = 'examples/seq2seq/wmt_en_ro'
_snake_case = max_len * 2 * 64
if not Path(lowerCAmelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case = max_len * 4
save_len_file(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_snake_case = SeqaSeqDataset(
lowerCAmelCase_ , data_dir=lowerCAmelCase_ , type_path='train' , max_source_length=lowerCAmelCase_ , max_target_length=lowerCAmelCase_ , n_obs=lowerCAmelCase_ , )
return ds, max_tokens, tokenizer
def lowerCAmelCase ( self ) -> int:
_snake_case , _snake_case , _snake_case = self._get_dataset()
_snake_case = set(DistributedSortishSampler(lowerCAmelCase_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCAmelCase_ ) )
_snake_case = set(DistributedSortishSampler(lowerCAmelCase_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCAmelCase_ ) )
assert idsa.intersection(lowerCAmelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
if tok_name == MBART_TINY:
_snake_case = SeqaSeqDataset(
lowerCAmelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case = SeqaSeqDataset(
lowerCAmelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCAmelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCAmelCase_ ) == 0
| 295
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
| 1
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_snake_case = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 295
| 1
|
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return number | (1 << position)
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 ) -> Optional[int]:
'''simple docstring'''
_snake_case = AutoTokenizer.from_pretrained('bert-base-cased' )
_snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase__ : Any ):
# max_length=None => use the model max length (it's actually the default)
_snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case = 16
elif accelerator.mixed_precision != "no":
_snake_case = 8
else:
_snake_case = None
return tokenizer.pad(
UpperCamelCase__ , padding='longest' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
_snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase__ ) == "1":
_snake_case = 2
# New Code #
_snake_case = int(args.gradient_accumulation_steps )
# Initialize accelerator
_snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCamelCase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case = config['lr']
_snake_case = int(config['num_epochs'] )
_snake_case = int(config['seed'] )
_snake_case = int(config['batch_size'] )
_snake_case = evaluate.load('glue' , 'mrpc' )
set_seed(UpperCamelCase__ )
_snake_case , _snake_case = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case = model.to(accelerator.device )
# Instantiate optimizer
_snake_case = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_snake_case = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCamelCase__ ):
_snake_case = model(**UpperCamelCase__ )
_snake_case = output.loss
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCamelCase__ )
def lowerCamelCase__ ( ) -> Dict:
'''simple docstring'''
_snake_case = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCamelCase__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_snake_case = parser.parse_args()
_snake_case = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 1
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCAmelCase_ = '''scheduler_config.json'''
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
lowerCAmelCase_ = 3
lowerCAmelCase_ = 4
lowerCAmelCase_ = 5
lowerCAmelCase_ = 6
lowerCAmelCase_ = 7
lowerCAmelCase_ = 8
lowerCAmelCase_ = 9
lowerCAmelCase_ = 10
lowerCAmelCase_ = 11
lowerCAmelCase_ = 12
lowerCAmelCase_ = 13
lowerCAmelCase_ = 14
@dataclass
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ = 42
class UpperCamelCase_ :
lowerCAmelCase_ = SCHEDULER_CONFIG_NAME
lowerCAmelCase_ = []
lowerCAmelCase_ = True
@classmethod
def lowerCAmelCase ( cls , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[str]:
_snake_case = cls.load_config(
pretrained_model_name_or_path=__a , subfolder=__a , return_unused_kwargs=__a , return_commit_hash=__a , **__a , )
return cls.from_config(__a , return_unused_kwargs=__a , **__a )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , **lowerCAmelCase_ ) -> List[str]:
self.save_config(save_directory=__a , push_to_hub=__a , **__a )
@property
def lowerCAmelCase ( self ) -> List[Any]:
return self._get_compatibles()
@classmethod
def lowerCAmelCase ( cls ) -> Optional[Any]:
_snake_case = list(set([cls.__name__] + cls._compatibles ) )
_snake_case = importlib.import_module(__name__.split('.' )[0] )
_snake_case = [
getattr(__a , __a ) for c in compatible_classes_str if hasattr(__a , __a )
]
return compatible_classes
| 350
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295
| 0
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 351
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295
| 0
|
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = name
_snake_case = value
_snake_case = weight
def __repr__( self ) -> int:
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowerCAmelCase ( self ) -> List[Any]:
return self.value
def lowerCAmelCase ( self ) -> Any:
return self.name
def lowerCAmelCase ( self ) -> Optional[Any]:
return self.weight
def lowerCAmelCase ( self ) -> List[str]:
return self.value / self.weight
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_snake_case = []
for i in range(len(__UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_snake_case = sorted(__UpperCAmelCase , key=__UpperCAmelCase , reverse=__UpperCAmelCase )
_snake_case = []
_snake_case = 0.0, 0.0
for i in range(len(__UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( ) -> str:
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295
| 0
|
from collections.abc import Callable
def lowerCamelCase__ ( UpperCamelCase__ : Callable[[float], float] , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
'''simple docstring'''
_snake_case = a
_snake_case = b
if function(__lowerCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__lowerCAmelCase ) == 0:
return b
elif (
function(__lowerCAmelCase ) * function(__lowerCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_snake_case = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__lowerCAmelCase ) == 0:
return mid
elif function(__lowerCAmelCase ) * function(__lowerCAmelCase ) < 0:
_snake_case = mid
else:
_snake_case = mid
_snake_case = start + (end - start) / 2.0
return mid
def lowerCamelCase__ ( UpperCamelCase__ : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 353
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295
| 0
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=8 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=36 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self ) -> str:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.get_config()
_snake_case = 300
return config
def lowerCAmelCase ( self ) -> Any:
(
_snake_case
) = self.prepare_config_and_inputs()
_snake_case = True
_snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = MraModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
_snake_case = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> str:
_snake_case = True
_snake_case = MraModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case = MraForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = MraForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = self.num_labels
_snake_case = MraForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = self.num_labels
_snake_case = MraForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = self.num_choices
_snake_case = MraForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.prepare_config_and_inputs()
(
_snake_case
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( a__ , unittest.TestCase ):
lowerCAmelCase_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = ()
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = MraModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MraModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='MRA does not output attentions' )
def lowerCAmelCase ( self ) -> List[Any]:
return
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
_snake_case = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(_lowerCamelCase )[0]
_snake_case = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
_snake_case = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(_lowerCamelCase )[0]
_snake_case = 5_0265
_snake_case = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
_snake_case = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(_lowerCamelCase )[0]
_snake_case = 5_0265
_snake_case = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 354
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295
| 0
|
"""simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase__ ( UpperCamelCase__ : np.ndarray ) -> Tuple:
'''simple docstring'''
return np.array_equal(__a , matrix.conjugate().T )
def lowerCamelCase__ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ) -> Tuple:
'''simple docstring'''
_snake_case = v.conjugate().T
_snake_case = v_star.dot(__a )
assert isinstance(__a , np.ndarray )
return (v_star_dot.dot(__a )) / (v_star.dot(__a ))
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_snake_case = np.array([[1], [2], [3]] )
assert is_hermitian(__a ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(__a , __a ) )
_snake_case = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__a ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(__a , __a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 355
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 295
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCamelCase_ ( lowerCamelCase__ ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCamelCase_ ( nn.Module ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = (16, 32, 96, 256)
lowerCAmelCase_ = jnp.floataa
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_snake_case = []
for i in range(len(self.block_out_channels ) - 1 ):
_snake_case = self.block_out_channels[i]
_snake_case = self.block_out_channels[i + 1]
_snake_case = nn.Conv(
lowerCAmelCase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCAmelCase_ )
_snake_case = nn.Conv(
lowerCAmelCase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCAmelCase_ )
_snake_case = blocks
_snake_case = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowerCAmelCase_ ) -> Dict:
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = nn.silu(lowerCAmelCase_ )
for block in self.blocks:
_snake_case = block(lowerCAmelCase_ )
_snake_case = nn.silu(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return embedding
@flax_register_to_config
class UpperCamelCase_ ( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_ = 32
lowerCAmelCase_ = 4
lowerCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ = False
lowerCAmelCase_ = (320, 640, 1280, 1280)
lowerCAmelCase_ = 2
lowerCAmelCase_ = 8
lowerCAmelCase_ = None
lowerCAmelCase_ = 1280
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
lowerCAmelCase_ = 0
lowerCAmelCase_ = "rgb"
lowerCAmelCase_ = (16, 32, 96, 256)
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
_snake_case = (1, self.in_channels, self.sample_size, self.sample_size)
_snake_case = jnp.zeros(lowerCAmelCase_ , dtype=jnp.floataa )
_snake_case = jnp.ones((1,) , dtype=jnp.intaa )
_snake_case = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_snake_case = (1, 3, self.sample_size * 8, self.sample_size * 8)
_snake_case = jnp.zeros(lowerCAmelCase_ , dtype=jnp.floataa )
_snake_case , _snake_case = jax.random.split(lowerCAmelCase_ )
_snake_case = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )["params"]
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.block_out_channels
_snake_case = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_snake_case = self.num_attention_heads or self.attention_head_dim
# input
_snake_case = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_snake_case = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_snake_case = FlaxTimestepEmbedding(lowerCAmelCase_ , dtype=self.dtype )
_snake_case = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_snake_case = self.only_cross_attention
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = (num_attention_heads,) * len(self.down_block_types )
# down
_snake_case = []
_snake_case = []
_snake_case = block_out_channels[0]
_snake_case = nn.Conv(
lowerCAmelCase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase_ )
for i, down_block_type in enumerate(self.down_block_types ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_snake_case = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_snake_case = FlaxDownBlockaD(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase_ )
for _ in range(self.layers_per_block ):
_snake_case = nn.Conv(
lowerCAmelCase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase_ )
if not is_final_block:
_snake_case = nn.Conv(
lowerCAmelCase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase_ )
_snake_case = down_blocks
_snake_case = controlnet_down_blocks
# mid
_snake_case = block_out_channels[-1]
_snake_case = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCAmelCase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_snake_case = nn.Conv(
lowerCAmelCase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = True , lowerCAmelCase_ = False , ) -> str:
_snake_case = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_snake_case = jnp.flip(lowerCAmelCase_ , axis=1 )
# 1. time
if not isinstance(lowerCAmelCase_ , jnp.ndarray ):
_snake_case = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_snake_case = timesteps.astype(dtype=jnp.floataa )
_snake_case = jnp.expand_dims(lowerCAmelCase_ , 0 )
_snake_case = self.time_proj(lowerCAmelCase_ )
_snake_case = self.time_embedding(lowerCAmelCase_ )
# 2. pre-process
_snake_case = jnp.transpose(lowerCAmelCase_ , (0, 2, 3, 1) )
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = jnp.transpose(lowerCAmelCase_ , (0, 2, 3, 1) )
_snake_case = self.controlnet_cond_embedding(lowerCAmelCase_ )
sample += controlnet_cond
# 3. down
_snake_case = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case , _snake_case = down_block(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , deterministic=not train )
else:
_snake_case , _snake_case = down_block(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , deterministic=not train )
# 5. contronet blocks
_snake_case = ()
for down_block_res_sample, controlnet_block in zip(lowerCAmelCase_ , self.controlnet_down_blocks ):
_snake_case = controlnet_block(lowerCAmelCase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_snake_case = controlnet_down_block_res_samples
_snake_case = self.controlnet_mid_block(lowerCAmelCase_ )
# 6. scaling
_snake_case = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCAmelCase_ , mid_block_res_sample=lowerCAmelCase_ )
| 356
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295
| 0
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : tuple , UpperCamelCase__ : Path , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=False , ) -> Optional[int]:
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : bool = False ) -> Any:
'''simple docstring'''
_snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_snake_case = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
_snake_case = 'cpu'
_snake_case = Path(__A )
# VAE DECODER
_snake_case = AutoencoderKL.from_pretrained(model_path + '/vae' )
_snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
_snake_case = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCAmelCase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 357
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 0
|
from numpy import exp, pi, sqrt
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 ) -> Tuple:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295
| 0
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=2 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=36 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , lowerCAmelCase_=1000 , ) -> List[str]:
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = patch_size
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = coordinate_size
_snake_case = shape_size
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_snake_case = text_seq_length
_snake_case = (image_size // patch_size) ** 2 + 1
_snake_case = self.text_seq_length + self.image_seq_length
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_snake_case = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = tmp_coordinate
_snake_case = tf.constant(__lowercase )
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.text_seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_snake_case = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_snake_case = TFLayoutLMvaModel(config=__lowercase )
# text + image
_snake_case = model(__lowercase , pixel_values=__lowercase , training=__lowercase )
_snake_case = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , training=__lowercase , )
_snake_case = model(__lowercase , bbox=__lowercase , pixel_values=__lowercase , training=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_snake_case = model(__lowercase , training=__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_snake_case = model({'pixel_values': pixel_values} , training=__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = self.num_labels
_snake_case = TFLayoutLMvaForSequenceClassification(config=__lowercase )
_snake_case = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , training=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.num_labels
_snake_case = TFLayoutLMvaForTokenClassification(config=__lowercase )
_snake_case = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , training=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = 2
_snake_case = TFLayoutLMvaForQuestionAnswering(config=__lowercase )
_snake_case = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , training=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __A , __A , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
return True
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]:
_snake_case = copy.deepcopy(__lowercase )
if model_class in get_values(__lowercase ):
_snake_case = {
k: tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__lowercase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__lowercase ):
_snake_case = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowercase ):
_snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowercase ):
_snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowercase ):
_snake_case = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = TFLayoutLMvaModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def lowerCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__lowercase )
if getattr(__lowercase , 'hf_compute_loss' , __lowercase ):
# The number of elements in the loss should be the same as the number of elements in the label
_snake_case = self._prepare_for_class(inputs_dict.copy() , __lowercase , return_labels=__lowercase )
_snake_case = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__lowercase )[0]
]
_snake_case = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_snake_case = self._prepare_for_class(inputs_dict.copy() , __lowercase , return_labels=__lowercase )
_snake_case = prepared_for_class.pop('input_ids' )
_snake_case = model(__lowercase , **__lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_snake_case = self._prepare_for_class(inputs_dict.copy() , __lowercase , return_labels=__lowercase )
_snake_case = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
_snake_case = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_snake_case = -100
_snake_case = tf.convert_to_tensor(__lowercase )
_snake_case = model(__lowercase , **__lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_snake_case = self._prepare_for_class(inputs_dict.copy() , __lowercase , return_labels=__lowercase )
_snake_case = model(__lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_snake_case = self._prepare_for_class(inputs_dict.copy() , __lowercase , return_labels=__lowercase )
# Get keys that were added with the _prepare_for_class function
_snake_case = prepared_for_class.keys() - inputs_dict.keys()
_snake_case = inspect.signature(model.call ).parameters
_snake_case = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_snake_case = {0: 'input_ids'}
for label_key in label_keys:
_snake_case = signature_names.index(__lowercase )
_snake_case = label_key
_snake_case = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_snake_case = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_snake_case = prepared_for_class[value]
_snake_case = tuple(__lowercase )
# Send to model
_snake_case = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowerCAmelCase ( self ) -> str:
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def lowerCAmelCase ( self ) -> Dict:
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def lowerCAmelCase ( self ) -> Dict:
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def lowerCAmelCase ( self ) -> List[Any]:
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def lowerCAmelCase ( self ) -> List[str]:
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
@slow
def lowerCAmelCase ( self ) -> Dict:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFLayoutLMvaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=__lowercase ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=__lowercase , return_tensors='tf' ).pixel_values
_snake_case = tf.constant([[1, 2]] )
_snake_case = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_snake_case = model(input_ids=__lowercase , bbox=__lowercase , pixel_values=__lowercase , training=__lowercase )
# verify the logits
_snake_case = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __lowercase )
_snake_case = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ) )
| 359
|
from collections.abc import Sequence
def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_snake_case = 0 if allow_empty_subarrays else float('-inf' )
_snake_case = 0.0
for num in arr:
_snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 295
| 0
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> int:
'''simple docstring'''
if gpta_config_file == "":
_snake_case = GPTaConfig()
else:
_snake_case = GPTaConfig.from_json_file(__lowerCamelCase )
_snake_case = GPTaModel(__lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
_snake_case = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_snake_case = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __lowerCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
UpperCAmelCase_ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 360
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295
| 0
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any]=0.999 , UpperCamelCase__ : List[str]="cosine" , ) -> Optional[int]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_snake_case = []
for i in range(lowerCAmelCase__ ):
_snake_case = i / num_diffusion_timesteps
_snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class UpperCamelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase_ = 2
@register_to_config
def __init__( self , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = 0.0_00_85 , lowerCAmelCase_ = 0.0_12 , lowerCAmelCase_ = "linear" , lowerCAmelCase_ = None , lowerCAmelCase_ = "epsilon" , lowerCAmelCase_ = "linspace" , lowerCAmelCase_ = 0 , ) -> Optional[Any]:
if trained_betas is not None:
_snake_case = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
_snake_case = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_snake_case = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_snake_case = 1.0 - self.betas
_snake_case = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Tuple:
if schedule_timesteps is None:
_snake_case = self.timesteps
_snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_snake_case = 1 if len(_a ) > 1 else 0
else:
_snake_case = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , ) -> str:
_snake_case = self.index_for_timestep(_a )
if self.state_in_first_order:
_snake_case = self.sigmas[step_index]
else:
_snake_case = self.sigmas_interpol[step_index]
_snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> Optional[Any]:
_snake_case = num_inference_steps
_snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_snake_case = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_snake_case = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_snake_case = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_snake_case = torch.from_numpy(np.log(_a ) ).to(_a )
_snake_case = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
_snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_snake_case = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
_snake_case = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_snake_case = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_snake_case = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_snake_case = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
_snake_case = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
_snake_case = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
_snake_case = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_snake_case = torch.cat([timesteps[:1], interleaved_timesteps] )
_snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_snake_case = defaultdict(_a )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
# get log sigma
_snake_case = sigma.log()
# get distribution
_snake_case = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_snake_case = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_snake_case = low_idx + 1
_snake_case = self.log_sigmas[low_idx]
_snake_case = self.log_sigmas[high_idx]
# interpolate sigmas
_snake_case = (low - log_sigma) / (low - high)
_snake_case = w.clamp(0 , 1 )
# transform interpolation to time range
_snake_case = (1 - w) * low_idx + w * high_idx
_snake_case = t.view(sigma.shape )
return t
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
return self.sample is None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , ) -> str:
_snake_case = self.index_for_timestep(_a )
# advance index counter by 1
_snake_case = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_snake_case = self.sigmas[step_index]
_snake_case = self.sigmas_interpol[step_index + 1]
_snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_snake_case = self.sigmas[step_index - 1]
_snake_case = self.sigmas_interpol[step_index]
_snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_snake_case = 0
_snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
_snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
_snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_snake_case = sigma_interpol - sigma_hat
# store for 2nd order step
_snake_case = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_snake_case = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_snake_case = sigma_next - sigma_hat
_snake_case = self.sample
_snake_case = None
_snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Union[str, Any]:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_snake_case = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_snake_case = self.timesteps.to(original_samples.device )
_snake_case = timesteps.to(original_samples.device )
_snake_case = [self.index_for_timestep(_a , _a ) for t in timesteps]
_snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_snake_case = sigma.unsqueeze(-1 )
_snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 361
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=400 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , ) -> int:
_snake_case = size if size is not None else {'height': 18, 'width': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = apply_ocr
def lowerCAmelCase ( self ) -> List[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self ) -> Dict:
_snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'apply_ocr' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def lowerCAmelCase ( self ) -> Dict:
pass
def lowerCAmelCase ( self ) -> str:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase_ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase_ )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self ) -> Dict:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self ) -> Union[str, Any]:
# with apply_OCR = True
_snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
_snake_case = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_snake_case = Image.open(ds[0]['file'] ).convert('RGB' )
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_snake_case = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_snake_case = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase_ )
self.assertListEqual(encoding.boxes , lowerCAmelCase_ )
# with apply_OCR = False
_snake_case = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ )
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 362
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295
| 0
|
"""simple docstring"""
from math import sqrt
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> bool:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
_snake_case = True
# 0 and 1 are none primes.
if number <= 1:
_snake_case = False
for divisor in range(2 , int(round(sqrt(_lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_snake_case = False
break
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'status' must been from type bool"
return status
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_snake_case = list(range(2 , n + 1 ) )
_snake_case = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_snake_case = 0
# filters actual prime numbers.
_snake_case = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
_snake_case = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_lowerCamelCase ):
ans.append(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
_snake_case = [] # this list will be returns of the function.
# potential prime number factors.
_snake_case = 2
_snake_case = number
if number == 0 or number == 1:
ans.append(_lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_lowerCamelCase ):
while quotient != 1:
if is_prime(_lowerCamelCase ) and (quotient % factor == 0):
ans.append(_lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( UpperCamelCase__ : List[str] ) -> Any:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_snake_case = 0
# prime factorization of 'number'
_snake_case = prime_factorization(_lowerCamelCase )
_snake_case = max(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_snake_case = 0
# prime factorization of 'number'
_snake_case = prime_factorization(_lowerCamelCase )
_snake_case = min(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and (number > 2) and is_even(_lowerCamelCase )
), "'number' must been an int, even and > 2"
_snake_case = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_snake_case = get_prime_numbers(_lowerCamelCase )
_snake_case = len(_lowerCamelCase )
# run variable for while-loops.
_snake_case = 0
_snake_case = None
# exit variable. for break up the loops
_snake_case = True
while i < len_pn and loop:
_snake_case = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_snake_case = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and (len(_lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_snake_case = 0
while numbera != 0:
_snake_case = numbera % numbera
_snake_case = numbera
_snake_case = rest
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_snake_case = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_snake_case = prime_factorization(_lowerCamelCase )
_snake_case = prime_factorization(_lowerCamelCase )
elif numbera == 1 or numbera == 1:
_snake_case = []
_snake_case = []
_snake_case = max(_lowerCamelCase , _lowerCamelCase )
_snake_case = 0
_snake_case = 0
_snake_case = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_snake_case = prime_fac_a.count(_lowerCamelCase )
_snake_case = prime_fac_a.count(_lowerCamelCase )
for _ in range(max(_lowerCamelCase , _lowerCamelCase ) ):
ans *= n
else:
_snake_case = prime_fac_a.count(_lowerCamelCase )
for _ in range(_lowerCamelCase ):
ans *= n
done.append(_lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_snake_case = prime_fac_a.count(_lowerCamelCase )
for _ in range(_lowerCamelCase ):
ans *= n
done.append(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
_snake_case = 0
_snake_case = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_lowerCamelCase ):
ans += 1
# precondition
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and is_prime(
_lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> int:
'''simple docstring'''
assert (
is_prime(_lowerCamelCase ) and is_prime(_lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_snake_case = p_number_a + 1 # jump to the next number
_snake_case = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(_lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(_lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and ans[0] != p_number_a
and ans[len(_lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> str:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
_snake_case = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(_lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> Any:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
_snake_case = get_divisors(_lowerCamelCase )
# precondition
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(_lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_snake_case = gcd(abs(_lowerCamelCase ) , abs(_lowerCamelCase ) )
# precondition
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
_snake_case = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
_snake_case = 0
_snake_case = 1
_snake_case = 1 # this will be return
for _ in range(n - 1 ):
_snake_case = ans
ans += fiba
_snake_case = tmp
return ans
| 363
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
| 0
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCAmelCase_ = """pytorch_model.bin"""
UpperCAmelCase_ = """pytorch_model.bin.index.json"""
UpperCAmelCase_ = """adapter_config.json"""
UpperCAmelCase_ = """adapter_model.bin"""
UpperCAmelCase_ = """adapter_model.safetensors"""
UpperCAmelCase_ = """tf_model.h5"""
UpperCAmelCase_ = """tf_model.h5.index.json"""
UpperCAmelCase_ = """model.ckpt"""
UpperCAmelCase_ = """flax_model.msgpack"""
UpperCAmelCase_ = """flax_model.msgpack.index.json"""
UpperCAmelCase_ = """model.safetensors"""
UpperCAmelCase_ = """model.safetensors.index.json"""
UpperCAmelCase_ = """config.json"""
UpperCAmelCase_ = """preprocessor_config.json"""
UpperCAmelCase_ = FEATURE_EXTRACTOR_NAME
UpperCAmelCase_ = """generation_config.json"""
UpperCAmelCase_ = """modelcard.json"""
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCAmelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCAmelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCAmelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Dict:
'''simple docstring'''
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
_snake_case = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
_snake_case = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 364
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 0
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : tuple , UpperCamelCase__ : Path , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=False , ) -> str:
'''simple docstring'''
output_path.parent.mkdir(parents=__a , exist_ok=__a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__a , __a , f=output_path.as_posix() , input_names=__a , output_names=__a , dynamic_axes=__a , do_constant_folding=__a , use_external_data_format=__a , enable_onnx_checker=__a , opset_version=__a , )
else:
export(
__a , __a , f=output_path.as_posix() , input_names=__a , output_names=__a , dynamic_axes=__a , do_constant_folding=__a , opset_version=__a , )
@torch.no_grad()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : bool = False ) -> Tuple:
'''simple docstring'''
_snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_snake_case = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
_snake_case = '''cpu'''
_snake_case = Path(__a )
# VAE DECODER
_snake_case = AutoencoderKL.from_pretrained(model_path + '/vae' )
_snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
_snake_case = vae_decoder.decode
onnx_export(
__a , model_args=(
torch.randn(1 , __a , 25 , 25 ).to(device=__a , dtype=__a ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=__a , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCAmelCase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 365
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase ( self ) -> Dict:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
| 0
|
from ...configuration_utils import PretrainedConfig
UpperCAmelCase_ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = "tapas"
def __init__( self , lowerCAmelCase_=3_0522 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1024 , lowerCAmelCase_=[3, 256, 256, 2, 256, 256, 10] , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0 , lowerCAmelCase_=10.0 , lowerCAmelCase_=0 , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=1.0 , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=1.0 , lowerCAmelCase_=1.0 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_="ratio" , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=64 , lowerCAmelCase_=32 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Optional[int]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_sizes
_snake_case = initializer_range
_snake_case = layer_norm_eps
# Fine-tuning task hyperparameters
_snake_case = positive_label_weight
_snake_case = num_aggregation_labels
_snake_case = aggregation_loss_weight
_snake_case = use_answer_as_supervision
_snake_case = answer_loss_importance
_snake_case = use_normalized_answer_loss
_snake_case = huber_loss_delta
_snake_case = temperature
_snake_case = aggregation_temperature
_snake_case = use_gumbel_for_cells
_snake_case = use_gumbel_for_aggregation
_snake_case = average_approximation_function
_snake_case = cell_selection_preference
_snake_case = answer_loss_cutoff
_snake_case = max_num_rows
_snake_case = max_num_columns
_snake_case = average_logits_per_cell
_snake_case = select_one_column
_snake_case = allow_empty_column_selection
_snake_case = init_cell_selection_weights_to_zero
_snake_case = reset_position_index_per_cell
_snake_case = disable_per_token_loss
# Aggregation hyperparameters
_snake_case = aggregation_labels
_snake_case = no_aggregation_label_index
if isinstance(self.aggregation_labels , __UpperCAmelCase ):
_snake_case = {int(__UpperCAmelCase ): v for k, v in aggregation_labels.items()}
| 366
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295
| 0
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> Dict:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Any:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase__ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ )
_snake_case = {"hidden_states": hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase__ , device=lowerCAmelCase__ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase__ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase__ , device=lowerCAmelCase__ )
return dummy_input
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase__ )
unet_block.to(lowerCAmelCase__ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase__ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
_snake_case = model(**lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase__ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase__ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase__ , lowerCAmelCase__ )
loss.backward()
| 367
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295
| 0
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase_ = HfApi()
UpperCAmelCase_ = {}
# fmt: off
UpperCAmelCase_ = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
UpperCAmelCase_ = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
UpperCAmelCase_ = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
UpperCAmelCase_ = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
UpperCAmelCase_ = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
UpperCAmelCase_ = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
UpperCAmelCase_ = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
UpperCAmelCase_ = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
UpperCAmelCase_ = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
UpperCAmelCase_ = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
UpperCAmelCase_ = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
UpperCAmelCase_ = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
UpperCAmelCase_ = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
UpperCAmelCase_ = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
UpperCAmelCase_ = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
UpperCAmelCase_ = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase_ = '/home/patrick/google_checkpoints/' + mod.modelId.split("""/""")[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith("""CompVis"""):
UpperCAmelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
UpperCAmelCase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase_ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 368
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
| 0
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase_ ( a__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = PhobertTokenizer
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
_snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_snake_case = ['''#version: 0.2''', '''l à</w>''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
_snake_case = '''Tôi là VinAI Research'''
_snake_case = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = '''Tôi là VinAI Research'''
_snake_case = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
_snake_case = tokenizer.tokenize(_lowerCamelCase )
print(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
| 369
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_snake_case = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 295
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=36 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = embedding_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_hidden_groups
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self ) -> Optional[int]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = AlbertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_snake_case = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_snake_case = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case = AlbertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , sentence_order_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = AlbertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = AlbertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = self.num_labels
_snake_case = AlbertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_snake_case = self.num_labels
_snake_case = AlbertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.num_choices
_snake_case = AlbertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.prepare_config_and_inputs()
(
_snake_case
) = config_and_inputs
_snake_case = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_snake_case = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = AlbertModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def lowerCAmelCase ( self ) -> Tuple:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = AlbertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = AlbertModel.from_pretrained('albert-base-v2' )
_snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
_snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_snake_case = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 370
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "▁"
UpperCAmelCase_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
UpperCAmelCase_ = {
"google/pegasus-xsum": 512,
}
class UpperCamelCase_ ( _UpperCAmelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PegasusTokenizer
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<pad>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<mask_2>" , lowerCAmelCase_="<mask_1>" , lowerCAmelCase_=None , lowerCAmelCase_=103 , **lowerCAmelCase_ , ) -> Union[str, Any]:
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowercase_ )}, but is'''
F''' {type(lowercase_ )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> Tuple:
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> int:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> int:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 371
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
if str(_a ).startswith('mps' ):
_snake_case = torch.manual_seed(_a )
else:
_snake_case = torch.Generator(device=_a ).manual_seed(_a )
_snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase ( self ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCAmelCase ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase ( self ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_local()
def lowerCAmelCase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 350
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(__lowerCamelCase ) * abs(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 351
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
UpperCAmelCase_ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class UpperCamelCase_ ( __snake_case ):
lowerCAmelCase_ = '''albert'''
def __init__( self , lowerCAmelCase_=3_0000 , lowerCAmelCase_=128 , lowerCAmelCase_=4096 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=64 , lowerCAmelCase_=1_6384 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=512 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Tuple:
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
_snake_case = vocab_size
_snake_case = embedding_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_hidden_groups
_snake_case = num_attention_heads
_snake_case = inner_group_num
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = classifier_dropout_prob
_snake_case = position_embedding_type
class UpperCamelCase_ ( __snake_case ):
@property
def lowerCAmelCase ( self ) -> Tuple:
if self.task == "multiple-choice":
_snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 352
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295
| 0
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase_ = 5
UpperCAmelCase_ = 10
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
lowerCAmelCase_ = SpeechaTextTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> str:
super().setUp()
_snake_case = sp.SentencePieceProcessor()
spm_model.Load(snake_case__ )
_snake_case = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case__ ) )]
_snake_case = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_snake_case = Path(self.tmpdirname )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
_snake_case = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self ) -> str:
_snake_case = '<pad>'
_snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 1001 )
def lowerCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_snake_case = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [289, 50, 14, 174, 386] , )
_snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_snake_case = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
_snake_case = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def lowerCAmelCase ( self ) -> Any:
_snake_case = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class UpperCamelCase_ ( unittest.TestCase ):
lowerCAmelCase_ = '''valhalla/s2t_mustc_multilinguial_medium'''
lowerCAmelCase_ = '''C\'est trop cool'''
lowerCAmelCase_ = '''Esto es genial'''
@classmethod
def lowerCAmelCase ( cls ) -> Optional[int]:
_snake_case = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowerCAmelCase ( self ) -> List[str]:
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def lowerCAmelCase ( self ) -> str:
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def lowerCAmelCase ( self ) -> int:
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_snake_case = [ES_CODE, 4, 1601, 47, 7647, 2]
_snake_case = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = 'fr'
_snake_case = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
_snake_case = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 353
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 ) -> Optional[Any]:
'''simple docstring'''
_snake_case = AutoTokenizer.from_pretrained('bert-base-cased' )
_snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
_snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case = 16
elif accelerator.mixed_precision != "no":
_snake_case = 8
else:
_snake_case = None
return tokenizer.pad(
UpperCamelCase__ , padding='longest' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
_snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase__ ) == "1":
_snake_case = 2
# New Code #
_snake_case = int(args.gradient_accumulation_steps )
# Initialize accelerator
_snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCamelCase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case = config['lr']
_snake_case = int(config['num_epochs'] )
_snake_case = int(config['seed'] )
_snake_case = int(config['batch_size'] )
_snake_case = evaluate.load('glue' , 'mrpc' )
set_seed(UpperCamelCase__ )
_snake_case , _snake_case = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case = model.to(accelerator.device )
# Instantiate optimizer
_snake_case = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_snake_case = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCamelCase__ ):
_snake_case = model(**UpperCamelCase__ )
_snake_case = output.loss
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCamelCase__ )
def lowerCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_snake_case = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCamelCase__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_snake_case = parser.parse_args()
_snake_case = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 354
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295
| 0
|
"""simple docstring"""
import os
import sys
import unittest
UpperCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
UpperCAmelCase_ = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
UpperCAmelCase_ = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_snake_case = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_snake_case = {"""BertModelTest""": """BertModelTester"""}
_snake_case = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
_snake_case = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
_snake_case = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
_snake_case = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase ( self ) -> str:
_snake_case = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_snake_case = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_snake_case = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
_snake_case = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 355
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 295
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[str]] , UpperCamelCase__ : int , ) -> None:
'''simple docstring'''
_snake_case = len(lowercase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowercase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowercase__ , lowercase__ , )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> None:
'''simple docstring'''
_snake_case = []
depth_first_search([] , [] , [] , lowercase__ , lowercase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowercase__ )
print('' )
print(len(lowercase__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 356
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str = " " ) -> Optional[Any]:
'''simple docstring'''
_snake_case = []
_snake_case = 0
for index, char in enumerate(A__ ):
if char == separator:
split_words.append(string[last_index:index] )
_snake_case = index + 1
elif index + 1 == len(A__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 0
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase_ = 2
class UpperCamelCase_ :
def __init__( self , *, # begin keyword-only arguments
lowerCAmelCase_="<s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_=None , ) -> Tuple:
_snake_case , _snake_case , _snake_case , _snake_case = bos, unk, pad, eos
_snake_case = []
_snake_case = []
_snake_case = {}
_snake_case = self.add_symbol(__lowerCAmelCase )
_snake_case = self.add_symbol(__lowerCAmelCase )
_snake_case = self.add_symbol(__lowerCAmelCase )
_snake_case = self.add_symbol(__lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__lowerCAmelCase )
_snake_case = len(self.symbols )
def __eq__( self , lowerCAmelCase_ ) -> Tuple:
return self.indices == other.indices
def __getitem__( self , lowerCAmelCase_ ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> Tuple:
return len(self.symbols )
def __contains__( self , lowerCAmelCase_ ) -> int:
return sym in self.indices
@classmethod
def lowerCAmelCase ( cls , lowerCAmelCase_ ) -> int:
_snake_case = cls()
d.add_from_file(__lowerCAmelCase )
return d
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False ) -> List[Any]:
if word in self.indices and not overwrite:
_snake_case = self.indices[word]
_snake_case = self.count[idx] + n
return idx
else:
_snake_case = len(self.symbols )
_snake_case = idx
self.symbols.append(__lowerCAmelCase )
self.count.append(__lowerCAmelCase )
return idx
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
return 0
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(__lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(__lowerCAmelCase ) )
return
_snake_case = f.readlines()
_snake_case = self._load_meta(__lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
_snake_case , _snake_case = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_snake_case = True
_snake_case , _snake_case = line.rsplit(' ' , 1 )
else:
_snake_case = False
_snake_case = int(__lowerCAmelCase )
_snake_case = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(__lowerCAmelCase ) )
self.add_symbol(__lowerCAmelCase , n=__lowerCAmelCase , overwrite=__lowerCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
_snake_case = dict((re.sub(R'@@$' , '' , UpperCamelCase__ ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , UpperCamelCase__ ), v) for k, v in d.items() )
_snake_case = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_snake_case = d[k] # restore
return da
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase__ ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_snake_case = os.path.join(UpperCamelCase__ , 'checkpoint.pt' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
_snake_case = chkpt['cfg']['model']
# dicts
_snake_case = os.path.join(UpperCamelCase__ , 'dict.txt' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
_snake_case = Dictionary.load(UpperCamelCase__ )
_snake_case = rewrite_dict_keys(src_dict.indices )
_snake_case = len(UpperCamelCase__ )
_snake_case = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['vocab_file'] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
_snake_case = os.path.join(UpperCamelCase__ , 'bpecodes' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
_snake_case = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
# model config
_snake_case = os.path.join(UpperCamelCase__ , 'config.json' )
_snake_case = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
_snake_case = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
_snake_case = chkpt['model']
# remove unneeded keys
_snake_case = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_snake_case = model_state_dict.pop(UpperCamelCase__ )
else:
_snake_case = model_state_dict.pop(UpperCamelCase__ )
_snake_case = BioGptConfig.from_pretrained(UpperCamelCase__ )
_snake_case = BioGptForCausalLM(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ )
# save
_snake_case = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print('Conversion is done!' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 358
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase_ = 'gpt_neox_japanese'
def __init__( self , lowerCAmelCase_=3_2000 , lowerCAmelCase_=2560 , lowerCAmelCase_=32 , lowerCAmelCase_=32 , lowerCAmelCase_=4 , lowerCAmelCase_="gelu" , lowerCAmelCase_=1.00 , lowerCAmelCase_=1_0000 , lowerCAmelCase_=2048 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=True , lowerCAmelCase_=3_1996 , lowerCAmelCase_=3_1999 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , **lowerCAmelCase_ , ) -> Optional[int]:
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_multiple_size
_snake_case = hidden_act
_snake_case = rotary_pct
_snake_case = rotary_emb_base
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = use_cache
_snake_case = attention_dropout
_snake_case = hidden_dropout
| 359
|
from collections.abc import Sequence
def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_snake_case = 0 if allow_empty_subarrays else float('-inf' )
_snake_case = 0.0
for num in arr:
_snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 295
| 0
|
from PIL import Image
def lowerCamelCase__ ( UpperCamelCase__ : Image ) -> Image:
'''simple docstring'''
_snake_case , _snake_case = image.size
_snake_case = 0
_snake_case = image.load()
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
_snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
_snake_case = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCAmelCase_ = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 360
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295
| 0
|
from collections import Counter
from timeit import timeit
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowerCamelCase__ ( UpperCamelCase__ : Dict = "" ) -> bool:
'''simple docstring'''
if len(a__ ) == 0:
return True
_snake_case = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_snake_case = {}
for character in lower_case_input_str:
_snake_case = character_freq_dict.get(a__ , 0 ) + 1
_snake_case = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCamelCase__ ( UpperCamelCase__ : List[str] = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , a__ , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(a__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(a__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase_ = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
UpperCAmelCase_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 361
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295
| 0
|
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
for i in range(0 , UpperCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
for i in range(UpperCamelCase__ , 0 , -1 ):
for _ in range(UpperCamelCase__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(UpperCamelCase__ ) # upper half
reverse_floyd(UpperCamelCase__ ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
UpperCAmelCase_ = 1
while K:
UpperCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
UpperCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 363
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
UpperCAmelCase_ = '''hopper-medium-v2'''
UpperCAmelCase_ = gym.make(env_name)
UpperCAmelCase_ = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
UpperCAmelCase_ = env.reset()
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1000
UpperCAmelCase_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase_ = pipeline(obs, planning_horizon=32)
# execute action in environment
UpperCAmelCase_ = env.step(denorm_actions)
UpperCAmelCase_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 364
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] = "The quick brown fox jumps over the lazy dog" , ) -> List[str]:
'''simple docstring'''
_snake_case = set()
# Replace all the whitespace in our sentence
_snake_case = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_UpperCAmelCase ) == 26
def lowerCamelCase__ ( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> Tuple:
'''simple docstring'''
_snake_case = [False] * 26
for char in input_str:
if char.islower():
_snake_case = True
elif char.isupper():
_snake_case = True
return all(_UpperCAmelCase )
def lowerCamelCase__ ( UpperCamelCase__ : int = "The quick brown fox jumps over the lazy dog" , ) -> Dict:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowerCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
from timeit import timeit
_snake_case = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit('is_pangram()' , setup=_UpperCAmelCase ) )
print(timeit('is_pangram_faster()' , setup=_UpperCAmelCase ) )
print(timeit('is_pangram_fastest()' , setup=_UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 365
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase ( self ) -> Dict:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(SCREAMING_SNAKE_CASE_ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if valid_connection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(SCREAMING_SNAKE_CASE_ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) else []
| 366
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295
| 0
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
UpperCAmelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
for attribute in key.split('.' ):
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
_snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(UpperCamelCase__ )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "bias" in name:
_snake_case = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case = 'weight'
else:
_snake_case = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
_snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
_snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : int=True ) -> Any:
'''simple docstring'''
if config_path is not None:
_snake_case = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ )
else:
_snake_case = UniSpeechSatConfig()
_snake_case = ''
if is_finetuned:
_snake_case = UniSpeechSatForCTC(UpperCamelCase__ )
else:
_snake_case = UniSpeechSatForPreTraining(UpperCamelCase__ )
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
_snake_case = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 367
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class UpperCamelCase_ ( A_ ):
lowerCAmelCase_ = '''lxmert'''
lowerCAmelCase_ = {}
def __init__( self , lowerCAmelCase_=3_0522 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=9500 , lowerCAmelCase_=1600 , lowerCAmelCase_=400 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=9 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=2048 , lowerCAmelCase_=4 , lowerCAmelCase_=6.67 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> int:
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = num_qa_labels
_snake_case = num_object_labels
_snake_case = num_attr_labels
_snake_case = l_layers
_snake_case = x_layers
_snake_case = r_layers
_snake_case = visual_feat_dim
_snake_case = visual_pos_dim
_snake_case = visual_loss_normalizer
_snake_case = task_matched
_snake_case = task_mask_lm
_snake_case = task_obj_predict
_snake_case = task_qa
_snake_case = visual_obj_loss
_snake_case = visual_attr_loss
_snake_case = visual_feat_loss
_snake_case = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_lowerCamelCase )
| 368
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( _lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase_ = '''fnet'''
def __init__( self , lowerCAmelCase_=3_2000 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=4 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=False , lowerCAmelCase_=512 , lowerCAmelCase_=3 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> List[Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
_snake_case = use_tpu_fourier_optimizations
_snake_case = tpu_short_seq_length
| 369
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_snake_case = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 295
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 370
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 0
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase_ = """true"""
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=82 , UpperCamelCase__ : List[str]=16 ) -> int:
'''simple docstring'''
set_seed(42 )
_snake_case = RegressionModel()
_snake_case = deepcopy(_lowerCAmelCase )
_snake_case = RegressionDataset(length=_lowerCAmelCase )
_snake_case = DataLoader(_lowerCAmelCase , batch_size=_lowerCAmelCase )
model.to(accelerator.device )
_snake_case = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
return model, ddp_model, dataloader
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any]=False ) -> List[str]:
'''simple docstring'''
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
_snake_case = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(UpperCamelCase__ : Any ):
_snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
with accelerator.main_process_first():
_snake_case = dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
_snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ : Any ):
if use_longest:
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=16 )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
_snake_case = Accelerator(dispatch_batches=_lowerCAmelCase , split_batches=_lowerCAmelCase )
_snake_case = get_dataloader(_lowerCAmelCase , not dispatch_batches )
_snake_case = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_lowerCAmelCase )
_snake_case = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> int:
'''simple docstring'''
_snake_case = []
for batch in dataloader:
_snake_case = batch.values()
with torch.no_grad():
_snake_case = model(_lowerCAmelCase )
_snake_case = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_snake_case = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCAmelCase )
targs.append(_lowerCAmelCase )
_snake_case = torch.cat(_lowerCAmelCase ), torch.cat(_lowerCAmelCase )
return logits, targs
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple=82 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Any=16 ) -> Dict:
'''simple docstring'''
_snake_case = get_basic_setup(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_snake_case = generate_predictions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert (
len(_lowerCAmelCase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCAmelCase )}'''
def lowerCamelCase__ ( UpperCamelCase__ : Dict = False , UpperCamelCase__ : int = False ) -> List[str]:
'''simple docstring'''
_snake_case = evaluate.load('glue' , 'mrpc' )
_snake_case = get_mrpc_setup(_lowerCAmelCase , _lowerCAmelCase )
# First do baseline
_snake_case = setup["""no"""]
model.to(_lowerCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCAmelCase )
with torch.inference_mode():
_snake_case = model(**_lowerCAmelCase )
_snake_case = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCAmelCase , references=batch['labels'] )
_snake_case = metric.compute()
# Then do distributed
_snake_case = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_snake_case = model(**_lowerCAmelCase )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case = batch["""labels"""]
_snake_case = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
_snake_case = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase__ ( ) -> Any:
'''simple docstring'''
_snake_case = Accelerator(split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_lowerCAmelCase , _lowerCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_snake_case = Accelerator(split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_lowerCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
_snake_case = Accelerator()
test_torch_metrics(_lowerCAmelCase , 512 )
accelerator.state._reset_state()
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 371
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> bool:
'''simple docstring'''
_snake_case = str(__lowerCAmelCase )
return n == n[::-1]
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] = 1_000_000 ) -> List[Any]:
'''simple docstring'''
_snake_case = 0
for i in range(1 , __lowerCAmelCase ):
if is_palindrome(__lowerCAmelCase ) and is_palindrome(bin(__lowerCAmelCase ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 350
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295
| 0
|
import torch
from torch import nn
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False ) -> Tuple:
super().__init__()
_snake_case = n_token
_snake_case = d_embed
_snake_case = d_proj
_snake_case = cutoffs + [n_token]
_snake_case = [0] + self.cutoffs
_snake_case = div_val
_snake_case = self.cutoffs[0]
_snake_case = len(self.cutoffs ) - 1
_snake_case = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_snake_case = nn.Parameter(torch.zeros(self.n_clusters ) )
_snake_case = nn.ModuleList()
_snake_case = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
_snake_case = keep_order
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
if proj is None:
_snake_case = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case = nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
_snake_case = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> Optional[int]:
if labels is not None:
# Shift so that tokens < n predict n
_snake_case = hidden[..., :-1, :].contiguous()
_snake_case = labels[..., 1:].contiguous()
_snake_case = hidden.view(-1 , hidden.size(-1 ) )
_snake_case = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
_snake_case = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_snake_case = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_snake_case = labels != -100
_snake_case = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
_snake_case = (
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_snake_case = nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
_snake_case = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.out_layers[0].weight[l_idx:r_idx]
_snake_case = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case = self.out_layers[i].weight
_snake_case = self.out_layers[i].bias
if i == 0:
_snake_case = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_snake_case = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
_snake_case = weights[0], biases[0], self.out_projs[0]
_snake_case = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
_snake_case = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_snake_case = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
_snake_case = 0
_snake_case = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
_snake_case = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case = (labels >= l_idx) & (labels < r_idx)
_snake_case = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case = labels.index_select(0 , lowerCAmelCase_ ) - l_idx
_snake_case = head_logprob.index_select(0 , lowerCAmelCase_ )
_snake_case = hidden.index_select(0 , lowerCAmelCase_ )
else:
_snake_case = hidden
if i == 0:
if labels is not None:
_snake_case = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_snake_case = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case = weights[i], biases[i], self.out_projs[i]
_snake_case = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_snake_case = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
if self.n_clusters == 0:
_snake_case = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
_snake_case = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.out_layers[0].weight[l_idx:r_idx]
_snake_case = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case = self.out_layers[i].weight
_snake_case = self.out_layers[i].bias
if i == 0:
_snake_case = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_snake_case = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
_snake_case = weights[0], biases[0], self.out_projs[0]
_snake_case = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_snake_case = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_snake_case = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
_snake_case = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case = weights[i], biases[i], self.out_projs[i]
_snake_case = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_snake_case = head_logprob[:, -i] + tail_logprob_i
_snake_case = logprob_i
return out
| 351
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> bool:
'''simple docstring'''
_snake_case = get_failure_array(UpperCamelCase__ )
# 2) Step through text searching for pattern
_snake_case , _snake_case = 0, 0 # index into text, pattern
while i < len(UpperCamelCase__ ):
if pattern[j] == text[i]:
if j == (len(UpperCamelCase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_snake_case = failure[j - 1]
continue
i += 1
return False
def lowerCamelCase__ ( UpperCamelCase__ : List[str] ) -> list[int]:
'''simple docstring'''
_snake_case = [0]
_snake_case = 0
_snake_case = 1
while j < len(UpperCamelCase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_snake_case = failure[i - 1]
continue
j += 1
failure.append(UpperCamelCase__ )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase_ = """abc1abc12"""
UpperCAmelCase_ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase_ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase_ = """ABABX"""
UpperCAmelCase_ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase_ = """AAAB"""
UpperCAmelCase_ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase_ = """abcdabcy"""
UpperCAmelCase_ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase_ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 352
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase_ = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( snake_case_ ):
lowerCAmelCase_ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowerCAmelCase_ ) -> Optional[int]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case = deprecated_arg[3:]
_snake_case = not kwargs.pop(lowerCAmelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_snake_case = kwargs.pop('tpu_name' , self.tpu_name )
_snake_case = kwargs.pop('device_idx' , self.device_idx )
_snake_case = kwargs.pop('eager_mode' , self.eager_mode )
_snake_case = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**lowerCAmelCase_ )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Name of TPU'''} , )
lowerCAmelCase_ = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
lowerCAmelCase_ = field(default=snake_case_ , metadata={'''help''': '''Benchmark models in eager model.'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def lowerCAmelCase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
_snake_case = None
if self.tpu:
try:
if self.tpu_name:
_snake_case = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case = None
return tpu
@cached_property
def lowerCAmelCase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
_snake_case = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
_snake_case = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCAmelCase ( self ) -> bool:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def lowerCAmelCase ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def lowerCAmelCase ( self ) -> Tuple:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def lowerCAmelCase ( self ) -> int:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase ( self ) -> bool:
return self.n_gpu > 0
| 353
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if n_term == "":
return []
_snake_case = []
for temp in range(int(_lowerCAmelCase ) ):
series.append(F'''1/{temp + 1}''' if series else '1' )
return series
if __name__ == "__main__":
UpperCAmelCase_ = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 354
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class UpperCamelCase_ ( lowerCAmelCase__ ):
lowerCAmelCase_ = "table-transformer"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=100 , lowerCAmelCase_=6 , lowerCAmelCase_=2048 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=2048 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=256 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_="resnet50" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , **lowerCAmelCase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = backbone_config.get('model_type' )
_snake_case = CONFIG_MAPPING[backbone_model_type]
_snake_case = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# set timm attributes to None
_snake_case , _snake_case , _snake_case = None, None, None
_snake_case = use_timm_backbone
_snake_case = backbone_config
_snake_case = num_channels
_snake_case = num_queries
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = init_xavier_std
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = encoder_layers
_snake_case = auxiliary_loss
_snake_case = position_embedding_type
_snake_case = backbone
_snake_case = use_pretrained_backbone
_snake_case = dilation
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = mask_loss_coefficient
_snake_case = dice_loss_coefficient
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self ) -> int:
return self.d_model
class UpperCamelCase_ ( lowerCAmelCase__ ):
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def lowerCAmelCase ( self ) -> int:
return 12
| 355
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 295
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
_snake_case = abs(UpperCamelCase__ )
_snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> str:
'''simple docstring'''
_snake_case = abs(UpperCamelCase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> Any:
'''simple docstring'''
return sum(int(UpperCamelCase__ ) for c in str(abs(UpperCamelCase__ ) ) )
def lowerCamelCase__ ( ) -> Any:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase__ : Callable , UpperCamelCase__ : int ) -> None:
_snake_case = F'''{func.__name__}({value})'''
_snake_case = timeit(F'''__main__.{call}''' , setup='import __main__' )
print(F'''{call:56} = {func(UpperCamelCase__ )} -- {timing:.4f} seconds''' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase__ , UpperCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 356
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCamelCase_ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
def __init__( self , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> List[str]:
super().__init__(features=lowerCamelCase__ )
_snake_case = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase__ )
return column
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
import torch
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_snake_case = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_snake_case = {'''dtype''': torch.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_snake_case = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
_snake_case = np.asarray(lowerCamelCase__ )
return torch.tensor(lowerCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase__ , '__array__' ) and not isinstance(lowerCamelCase__ , torch.Tensor ):
_snake_case = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Mapping:
_snake_case = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
_snake_case = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> "torch.Tensor":
_snake_case = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
_snake_case = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0] )
_snake_case = self.recursive_tensorize(lowerCamelCase__ )
_snake_case = self._consolidate(lowerCamelCase__ )
return column
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Mapping:
_snake_case = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
_snake_case = self.python_features_decoder.decode_batch(lowerCamelCase__ )
_snake_case = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
_snake_case = self._consolidate(batch[column_name] )
return batch
| 357
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=False ) -> Optional[int]:
'''simple docstring'''
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case = ''
else:
_snake_case = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dct.pop(A__ )
_snake_case = val
def lowerCamelCase__ ( ) -> Any:
'''simple docstring'''
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = DeiTConfig()
# all deit models have fine-tuned heads
_snake_case = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_snake_case = 1_000
_snake_case = 'huggingface/label-files'
_snake_case = 'imagenet-1k-id2label.json'
_snake_case = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(A__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = int(deit_name[-6:-4] )
_snake_case = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
_snake_case = 192
_snake_case = 768
_snake_case = 12
_snake_case = 3
elif deit_name[9:].startswith('small' ):
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
# load original model from timm
_snake_case = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case = timm_model.state_dict()
_snake_case = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
_snake_case = DeiTForImageClassificationWithTeacher(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by DeiTImageProcessor
_snake_case = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_snake_case = DeiTImageProcessor(size=A__ , crop_size=config.image_size )
_snake_case = image_processor(images=prepare_img() , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(A__ )
_snake_case = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 358
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=2 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=36 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , lowerCAmelCase_=1000 , ) -> Tuple:
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = patch_size
_snake_case = text_seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = coordinate_size
_snake_case = shape_size
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_snake_case = text_seq_length
_snake_case = (image_size // patch_size) ** 2 + 1
_snake_case = self.text_seq_length + self.image_seq_length
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.text_seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_snake_case = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case = LayoutLMvaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# text + image
_snake_case = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ )
_snake_case = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_snake_case = model(pixel_values=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_snake_case = self.num_labels
_snake_case = LayoutLMvaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.num_labels
_snake_case = LayoutLMvaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.prepare_config_and_inputs()
(
_snake_case
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def lowerCAmelCase ( self ) -> Dict:
_snake_case = LayoutLMvaModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
_snake_case = copy.deepcopy(lowerCAmelCase_ )
if model_class in get_values(lowerCAmelCase_ ):
_snake_case = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_snake_case = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in get_values(lowerCAmelCase_ ):
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in [
*get_values(lowerCAmelCase_ ),
]:
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in [
*get_values(lowerCAmelCase_ ),
]:
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase_ , )
return inputs_dict
def lowerCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LayoutLMvaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowerCAmelCase_ )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).pixel_values.to(lowerCAmelCase_ )
_snake_case = torch.tensor([[1, 2]] )
_snake_case = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_snake_case = model(
input_ids=input_ids.to(lowerCAmelCase_ ) , bbox=bbox.to(lowerCAmelCase_ ) , pixel_values=pixel_values.to(lowerCAmelCase_ ) , )
# verify the logits
_snake_case = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 359
|
from collections.abc import Sequence
def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_snake_case = 0 if allow_empty_subarrays else float('-inf' )
_snake_case = 0.0
for num in arr:
_snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 295
| 0
|
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase_ ( _lowercase ):
lowerCAmelCase_ = '''facebook/bart-large-mnli'''
lowerCAmelCase_ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowerCAmelCase_ = '''text_classifier'''
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSequenceClassification
lowerCAmelCase_ = ['''text''', ['''text''']]
lowerCAmelCase_ = ['''text''']
def lowerCAmelCase ( self ) -> Any:
super().setup()
_snake_case = self.model.config
_snake_case = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
_snake_case = int(__UpperCamelCase )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = labels
return self.pre_processor(
[text] * len(__UpperCamelCase ) , [F'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = outputs.logits
_snake_case = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 360
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295
| 0
|
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase_ ( __lowercase ):
def __init__( self ) -> Tuple:
# test for the above condition
self.test()
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = 0
_snake_case = False
while not completed:
if counter == 1:
self.reset()
_snake_case = self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
_snake_case , _snake_case , _snake_case = self.update(UpperCAmelCase__ )
counter += 1
if counter > 1_0000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def lowerCAmelCase ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCAmelCase ( self ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCAmelCase ( self ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCAmelCase ( self , lowerCAmelCase_=False ) -> Union[str, Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCamelCase_ ( __lowercase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
_snake_case = token_ids
_snake_case = len(self.token_ids )
_snake_case = -1 # the index of the currently fulfilled step
_snake_case = False
def lowerCAmelCase ( self ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
_snake_case = False
_snake_case = False
_snake_case = False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
_snake_case = True
if self.fulfilled_idx == (self.seqlen - 1):
_snake_case = True
_snake_case = completed
else:
# failed to make progress.
_snake_case = True
self.reset()
return stepped, completed, reset
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = False
_snake_case = 0
def lowerCAmelCase ( self ) -> List[str]:
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCAmelCase ( self , lowerCAmelCase_=False ) -> Optional[int]:
_snake_case = PhrasalConstraint(self.token_ids )
if stateful:
_snake_case = self.seqlen
_snake_case = self.fulfilled_idx
_snake_case = self.completed
return new_constraint
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=True ) -> str:
_snake_case = max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
_snake_case = {}
for token_ids in nested_token_ids:
_snake_case = root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
_snake_case = {}
_snake_case = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
_snake_case = root
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = self.trie
for current_token in current_seq:
_snake_case = start[current_token]
_snake_case = list(start.keys() )
return next_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
_snake_case = self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class UpperCamelCase_ ( __lowercase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
_snake_case = DisjunctiveTrie(UpperCAmelCase__ )
_snake_case = nested_token_ids
_snake_case = self.trie.max_height
_snake_case = []
_snake_case = False
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
_snake_case = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
_snake_case = False
_snake_case = False
_snake_case = False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
_snake_case = True
else:
_snake_case = True
self.reset()
_snake_case = self.trie.reached_leaf(self.current_seq )
_snake_case = completed
return stepped, completed, reset
def lowerCAmelCase ( self ) -> int:
_snake_case = False
_snake_case = []
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCAmelCase ( self , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = DisjunctiveConstraint(self.token_ids )
if stateful:
_snake_case = self.seqlen
_snake_case = self.current_seq
_snake_case = self.completed
return new_constraint
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ ) -> str:
_snake_case = constraints
# max # of steps required to fulfill a given constraint
_snake_case = max([c.seqlen for c in constraints] )
_snake_case = len(UpperCAmelCase__ )
_snake_case = False
self.init_state()
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = []
_snake_case = None
_snake_case = [constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def lowerCAmelCase ( self ) -> Any:
_snake_case = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCAmelCase ( self ) -> int:
_snake_case = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_snake_case = constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
_snake_case = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_snake_case , _snake_case = self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
_snake_case , _snake_case = False, False
if self.completed:
_snake_case = True
_snake_case = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_snake_case , _snake_case , _snake_case = self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
_snake_case = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_snake_case = None
if len(self.pending_constraints ) == 0:
# we're done!
_snake_case = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
_snake_case , _snake_case , _snake_case = pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
_snake_case = None
if not complete and stepped:
_snake_case = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_snake_case = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_snake_case = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCAmelCase ( self , lowerCAmelCase_=True ) -> Optional[int]:
_snake_case = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_snake_case = [
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_snake_case = self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
_snake_case = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 361
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
| 0
|
"""simple docstring"""
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase_ ( UpperCamelCase_ ):
lowerCAmelCase_ = """openai/whisper-base"""
lowerCAmelCase_ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowerCAmelCase_ = """transcriber"""
lowerCAmelCase_ = WhisperProcessor
lowerCAmelCase_ = WhisperForConditionalGeneration
lowerCAmelCase_ = ["""audio"""]
lowerCAmelCase_ = ["""text"""]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.pre_processor(_a , return_tensors='pt' ).input_features
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.model.generate(inputs=_a )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
return self.pre_processor.batch_decode(_a , skip_special_tokens=_a )[0]
| 362
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 363
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
| 0
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCAmelCase_ = ['small', 'medium', 'large']
UpperCAmelCase_ = 'lm_head.decoder.weight'
UpperCAmelCase_ = 'lm_head.weight'
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
_snake_case = torch.load(__snake_case )
_snake_case = d.pop(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
UpperCAmelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCAmelCase_ = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl")
UpperCAmelCase_ = F"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 364
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( __snake_case ):
lowerCAmelCase_ = "longformer"
def __init__( self , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3_0522 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 3072 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 1E-12 , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> str:
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class UpperCamelCase_ ( __snake_case ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = "default" , lowerCAmelCase_ = None ) -> Any:
super().__init__(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_snake_case = True
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: """batch"""}
return outputs
@property
def lowerCAmelCase ( self ) -> float:
return 1E-4
@property
def lowerCAmelCase ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super().generate_dummy_inputs(
preprocessor=lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs
| 365
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase ( self ) -> Dict:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
| 0
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCamelCase_ :
lowerCAmelCase_ = LEDConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=20 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=4 , ) -> Optional[int]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(_lowercase , _lowercase , _lowercase )
_snake_case = tf.concat(
[tf.zeros_like(_lowercase )[:, :-1], tf.ones_like(_lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = TFLEDModel(config=_lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(_lowercase , attention_mask=_lowercase )[0]
_snake_case = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-3 )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> Any:
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowercase )
def lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
def lowerCAmelCase ( self ) -> Dict:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCAmelCase_ ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCAmelCase_ ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(_lowercase )
_snake_case = model(self._prepare_for_class(_lowercase , _lowercase ) )
_snake_case = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(_lowercase )
_snake_case = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(_lowercase )
_snake_case = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(_lowercase )
_snake_case = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def lowerCAmelCase ( self ) -> Union[str, Any]:
pass
def lowerCAmelCase ( self ) -> Optional[Any]:
# TODO: Head-masking not yet implement
pass
def lowerCamelCase__ ( UpperCamelCase__ : List[str] ) -> str:
'''simple docstring'''
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
UpperCAmelCase_ = 1E-4
@slow
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self ) -> str:
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_snake_case = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
_snake_case = model(**_lowercase )[0]
_snake_case = (1, 1024, 768)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_snake_case = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
_snake_case = model(**_lowercase )[0]
_snake_case = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 , rtol=1E-3 )
| 366
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295
| 0
|
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
_snake_case = []
for i in range(len(UpperCamelCase__ ) ):
_snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(UpperCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(UpperCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(UpperCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(UpperCamelCase__ )
return next_generation
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int ) -> list[Image.Image]:
'''simple docstring'''
_snake_case = []
for _ in range(UpperCamelCase__ ):
# Create output image
_snake_case = Image.new('RGB' , (len(cells[0] ), len(UpperCamelCase__ )) )
_snake_case = img.load()
# Save cells to image
for x in range(len(UpperCamelCase__ ) ):
for y in range(len(cells[0] ) ):
_snake_case = 255 - cells[y][x] * 255
_snake_case = (colour, colour, colour)
# Save image
images.append(UpperCamelCase__ )
_snake_case = new_generation(UpperCamelCase__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 368
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase_ ( _lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase_ = '''megatron-bert'''
def __init__( self , lowerCAmelCase_=2_9056 , lowerCAmelCase_=1024 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=4096 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> str:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
| 369
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_snake_case = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 295
| 0
|
from __future__ import annotations
from statistics import mean
def lowerCamelCase__ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> list[int]:
'''simple docstring'''
_snake_case = [0] * no_of_processes
_snake_case = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCamelCase ):
_snake_case = burst_time[i]
_snake_case = []
_snake_case = 0
_snake_case = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_snake_case = []
_snake_case = -1
for i in range(_lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_snake_case = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_snake_case = i
total_time += burst_time[target_process]
completed += 1
_snake_case = 0
_snake_case = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCamelCase__ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> list[int]:
'''simple docstring'''
_snake_case = [0] * no_of_processes
for i in range(_lowerCamelCase ):
_snake_case = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
_lowercase = 4
_lowercase = [2, 5, 3, 7]
_lowercase = [0, 0, 0, 0]
_lowercase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowercase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 370
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ['''image_processor''']
lowerCAmelCase_ = '''SamImageProcessor'''
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(_SCREAMING_SNAKE_CASE )
_snake_case = self.image_processor
_snake_case = -10
_snake_case = self.image_processor.size['''longest_edge''']
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> BatchEncoding:
_snake_case = self.image_processor(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case = encoding_image_processor['''original_sizes''']
if hasattr(_SCREAMING_SNAKE_CASE , 'numpy' ): # Checks if Torch or TF tensor
_snake_case = original_sizes.numpy()
_snake_case = self._check_and_preprocess_points(
input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , input_boxes=_SCREAMING_SNAKE_CASE , )
_snake_case = self._normalize_and_convert(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , )
return encoding_image_processor
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="pt" , ) -> Dict:
if input_points is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
_snake_case = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , original_sizes[0] ) for point in input_points
]
else:
_snake_case = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for point, original_size in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case = self._pad_points_and_labels(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = np.array(_SCREAMING_SNAKE_CASE )
if input_labels is not None:
_snake_case = np.array(_SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
_snake_case = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , original_sizes[0] , is_bounding_box=_SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
_snake_case = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , is_bounding_box=_SCREAMING_SNAKE_CASE )
for box, original_size in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
_snake_case = np.array(_SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
_snake_case = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
_snake_case = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
_snake_case = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
_snake_case = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
_snake_case = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
_snake_case = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = max([point.shape[0] for point in input_points] )
_snake_case = []
for i, point in enumerate(_SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
_snake_case = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_SCREAMING_SNAKE_CASE )
_snake_case = processed_input_points
return input_points, input_labels
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> np.ndarray:
_snake_case = original_size
_snake_case = self.image_processor._get_preprocess_shape(_SCREAMING_SNAKE_CASE , longest_edge=_SCREAMING_SNAKE_CASE )
_snake_case = deepcopy(_SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
if is_bounding_box:
_snake_case = coords.reshape(-1 , 2 , 2 )
_snake_case = coords[..., 0] * (new_w / old_w)
_snake_case = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case = coords.reshape(-1 , 4 )
return coords
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
if input_points is not None:
if hasattr(_SCREAMING_SNAKE_CASE , 'numpy' ): # Checks for TF or Torch tensor
_snake_case = input_points.numpy().tolist()
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0] , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input points must be a list of list of floating points.' )
_snake_case = [np.array(_SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
_snake_case = None
if input_labels is not None:
if hasattr(_SCREAMING_SNAKE_CASE , 'numpy' ):
_snake_case = input_labels.numpy().tolist()
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0] , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input labels must be a list of list integers.' )
_snake_case = [np.array(_SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
_snake_case = None
if input_boxes is not None:
if hasattr(_SCREAMING_SNAKE_CASE , 'numpy' ):
_snake_case = input_boxes.numpy().tolist()
if (
not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0] , _SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0] , _SCREAMING_SNAKE_CASE )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
_snake_case = [np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case = None
return input_points, input_labels, input_boxes
@property
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]:
return self.image_processor.post_process_masks(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 371
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = Mock()
_snake_case = conn, Mock()
_snake_case = iter([1, None] )
_snake_case = lambda UpperCamelCase__ : next(lowerCamelCase__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=lowerCamelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 350
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
_snake_case = []
_snake_case = set({'(', '[', '{'} )
_snake_case = set({')', ']', '}'} )
_snake_case = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_SCREAMING_SNAKE_CASE ) == 0
def lowerCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
_snake_case = input('Enter sequence of brackets: ' )
if is_balanced(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE , 'is balanced' )
else:
print(_SCREAMING_SNAKE_CASE , 'is not balanced' )
if __name__ == "__main__":
main()
| 351
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295
| 0
|
from functools import reduce
UpperCAmelCase_ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( UpperCamelCase__ : str = N ) -> List[Any]:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase__ , UpperCamelCase__ : str(int(UpperCamelCase__ ) * int(UpperCamelCase__ ) ) , n[i : i + 13] ) )
for i in range(len(UpperCamelCase__ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 352
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase_ = random.Random()
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Any=1.0 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]=None ) -> Dict:
'''simple docstring'''
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=400 , lowerCAmelCase_=2000 , lowerCAmelCase_=24 , lowerCAmelCase_=24 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1_6000 , lowerCAmelCase_=True , lowerCAmelCase_=True , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = feature_size
_snake_case = num_mel_bins
_snake_case = padding_value
_snake_case = sampling_rate
_snake_case = return_attention_mask
_snake_case = do_normalize
def lowerCAmelCase ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> int:
def _flatten(lowerCAmelCase_ ):
return list(itertools.chain(*lowerCAmelCase_ ) )
if equal_length:
_snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = SpeechaTextFeatureExtractionTester(self )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase ( self ) -> int:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
_snake_case = feature_extractor(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_snake_case = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test batched
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case = np.asarray(lowerCAmelCase_ )
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = ['longest', 'max_length', 'do_not_pad']
_snake_case = [None, 16, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = feature_extractor(
lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = [np.sum(lowerCAmelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = ['longest', 'max_length', 'do_not_pad']
_snake_case = [None, 16, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = feature_extractor(
lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = [np.sum(lowerCAmelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feature_extractor(
lowerCAmelCase_ , padding='max_length' , max_length=4 , truncation=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ , )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feature_extractor(
lowerCAmelCase_ , padding='longest' , max_length=4 , truncation=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ , )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feature_extractor(
lowerCAmelCase_ , padding='longest' , max_length=16 , truncation=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ , )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowerCAmelCase ( self ) -> Tuple:
import torch
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = np.random.rand(100 , 32 ).astype(np.floataa )
_snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_snake_case = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
from datasets import load_dataset
_snake_case = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_snake_case = ds.sort('id' ).select(range(lowerCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
_snake_case = self._load_datasamples(1 )
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCAmelCase_ , atol=1E-4 ) )
| 353
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self ) -> int:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=lowerCAmelCase_ , )
assert hasattr(self , 'env' )
def lowerCAmelCase ( self , lowerCAmelCase_=1 ) -> str:
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=lowerCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase_ , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
TrainingJobAnalytics(lowerCAmelCase_ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowerCAmelCase_ )
| 354
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ = 1.054571817E-34 # unit of ℏ : J * s
UpperCAmelCase_ = 3E8 # unit of c : m * s^-1
def lowerCamelCase__ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_snake_case = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_snake_case = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_snake_case = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 295
| 0
|
import os
def lowerCamelCase__ ( ) -> Any:
'''simple docstring'''
_snake_case = os.path.dirname(os.path.realpath(__UpperCAmelCase ) )
_snake_case = os.path.join(__UpperCAmelCase , 'triangle.txt' )
with open(__UpperCAmelCase ) as f:
_snake_case = f.readlines()
_snake_case = []
for line in triangle:
_snake_case = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(__UpperCAmelCase ) )
a.append(__UpperCAmelCase )
for i in range(1 , len(__UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
_snake_case = a[i - 1][j] if j != len(a[i - 1] ) else 0
_snake_case = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCAmelCase , __UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 356
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295
| 0
|
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_snake_case = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
_snake_case = len(vectors[0] )
# Will help select random centroids from among the available vectors
_snake_case = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_snake_case = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_snake_case = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_snake_case = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
_snake_case = tf.placeholder('float64' , [dim] )
_snake_case = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_snake_case = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_snake_case = tf.placeholder('int32' )
_snake_case = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_snake_case = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_snake_case = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
_snake_case = tf.placeholder('float' , [dim] )
_snake_case = tf.placeholder('float' , [dim] )
_snake_case = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_snake_case = tf.placeholder('float' , [noofclusters] )
_snake_case = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_snake_case = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_snake_case = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
_snake_case = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_snake_case = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_snake_case = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
_snake_case = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_snake_case = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_snake_case = sess.run(lowercase_ )
_snake_case = sess.run(lowercase_ )
return centroids, assignments
| 357
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 0
|
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( a_ ):
lowerCAmelCase_ = '''SpeechT5FeatureExtractor'''
lowerCAmelCase_ = '''SpeechT5Tokenizer'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
super().__init__(lowercase_ , lowercase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
_snake_case = kwargs.pop('audio' , lowercase_ )
_snake_case = kwargs.pop('text' , lowercase_ )
_snake_case = kwargs.pop('text_target' , lowercase_ )
_snake_case = kwargs.pop('audio_target' , lowercase_ )
_snake_case = kwargs.pop('sampling_rate' , lowercase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
_snake_case = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
elif text is not None:
_snake_case = self.tokenizer(lowercase_ , **lowercase_ )
else:
_snake_case = None
if audio_target is not None:
_snake_case = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
_snake_case = targets['''input_values''']
elif text_target is not None:
_snake_case = self.tokenizer(lowercase_ , **lowercase_ )
_snake_case = targets['''input_ids''']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
_snake_case = kwargs.pop('input_values' , lowercase_ )
_snake_case = kwargs.pop('input_ids' , lowercase_ )
_snake_case = kwargs.pop('labels' , lowercase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
_snake_case = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ )
elif input_ids is not None:
_snake_case = self.tokenizer.pad(lowercase_ , **lowercase_ )
else:
_snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_ ) and "input_ids" in labels[0]):
_snake_case = self.tokenizer.pad(lowercase_ , **lowercase_ )
_snake_case = targets['''input_ids''']
else:
_snake_case = self.feature_extractor.feature_size
_snake_case = self.feature_extractor.num_mel_bins
_snake_case = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_ )
_snake_case = feature_size_hack
_snake_case = targets['''input_values''']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
| 358
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295
| 0
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCamelCase__ ( *UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict = None , UpperCamelCase__ : str=True , UpperCamelCase__ : int=2 ) -> List[str]:
'''simple docstring'''
from .. import __version__
_snake_case = take_from
_snake_case = ()
if not isinstance(args[0] , _lowercase ):
_snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowercase ).base_version ) >= version.parse(_lowercase ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_snake_case = None
if isinstance(_lowercase , _lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowercase ),)
_snake_case = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowercase , _lowercase ):
values += (getattr(_lowercase , _lowercase ),)
_snake_case = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_snake_case = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_snake_case = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , _lowercase , stacklevel=_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) > 0:
_snake_case = inspect.getouterframes(inspect.currentframe() )[1]
_snake_case = call_frame.filename
_snake_case = call_frame.lineno
_snake_case = call_frame.function
_snake_case , _snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowercase ) == 0:
return
elif len(_lowercase ) == 1:
return values[0]
return values
| 359
|
from collections.abc import Sequence
def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_snake_case = 0 if allow_empty_subarrays else float('-inf' )
_snake_case = 0.0
for num in arr:
_snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 295
| 0
|
from __future__ import annotations
UpperCAmelCase_ = list[tuple[int, int]]
UpperCAmelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> int:
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = g_cost
_snake_case = parent
_snake_case = self.calculate_heuristic()
def lowerCAmelCase ( self ) -> float:
_snake_case = abs(self.pos_x - self.goal_x )
_snake_case = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , lowerCAmelCase_ ) -> bool:
return self.f_cost < other.f_cost
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _SCREAMING_SNAKE_CASE )
_snake_case = [self.start]
_snake_case = []
_snake_case = False
def lowerCAmelCase ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_snake_case = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
_snake_case = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
_snake_case = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> list[Node]:
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Path:
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
UpperCAmelCase_ = GreedyBestFirst(init, goal)
UpperCAmelCase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase_ = 2
for elem in grid:
print(elem)
| 360
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295
| 0
|
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ = """examples/"""
UpperCAmelCase_ = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ = """README.md"""
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Tuple:
'''simple docstring'''
with open(_a , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case = f.read()
_snake_case , _snake_case = REPLACE_PATTERNS[pattern]
_snake_case = replace.replace('VERSION' , _a )
_snake_case = re_pattern.sub(_a , _a )
with open(_a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_a )
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
for folder, directories, fnames in os.walk(_a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_a , _a ) , _a , pattern='examples' )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=False ) -> Optional[int]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_a , _a , _a )
if not patch:
update_version_in_examples(_a )
def lowerCamelCase__ ( ) -> str:
'''simple docstring'''
_snake_case = '🤗 Transformers currently provides the following architectures'
_snake_case = '1. Want to contribute a new model?'
with open(_a , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case = f.readlines()
# Find the start of the list.
_snake_case = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_snake_case = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_snake_case = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(_a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_a )
def lowerCamelCase__ ( ) -> str:
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
_snake_case = f.read()
_snake_case = REPLACE_PATTERNS['init'][0].search(_a ).groups()[0]
return packaging.version.parse(_a )
def lowerCamelCase__ ( UpperCamelCase__ : int=False ) -> Any:
'''simple docstring'''
_snake_case = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_snake_case = default_version.base_version
elif patch:
_snake_case = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_snake_case = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_snake_case = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_a ) == 0:
_snake_case = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_a , patch=_a )
def lowerCamelCase__ ( ) -> Dict:
'''simple docstring'''
_snake_case = get_version()
_snake_case = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_snake_case = current_version.base_version
# Check with the user we got that right.
_snake_case = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_a ) == 0:
_snake_case = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_a )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 361
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCamelCase_ ( lowerCamelCase__ ):
lowerCAmelCase_ = """rwkv"""
lowerCAmelCase_ = {"""max_position_embeddings""": """context_length"""}
def __init__( self , lowerCAmelCase_=5_0277 , lowerCAmelCase_=1024 , lowerCAmelCase_=4096 , lowerCAmelCase_=32 , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> List[str]:
_snake_case = vocab_size
_snake_case = context_length
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = attention_hidden_size if attention_hidden_size is not None else hidden_size
_snake_case = intermediate_size if intermediate_size is not None else 4 * hidden_size
_snake_case = layer_norm_epsilon
_snake_case = rescale_every
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
tie_word_embeddings=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 362
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295
| 0
|
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = "openai/clip-vit-large-patch14" ) -> None:
_snake_case = device
_snake_case = CLIPTokenizerFast.from_pretrained(lowerCAmelCase__ )
_snake_case = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
_snake_case = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
_snake_case = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_snake_case = torchvision.transforms.Resize(224 )
_snake_case = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_snake_case = self.resize(lowerCAmelCase__ )
_snake_case = self.center_crop(lowerCAmelCase__ )
_snake_case = self.normalize(lowerCAmelCase__ )
return images
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(text=lowerCAmelCase__ , **lowerCAmelCase__ )
_snake_case = self.preprocess_img(lowerCAmelCase__ )
_snake_case = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_=10 , lowerCAmelCase_=0.01 , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="image" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> None:
super().__init__()
_snake_case = None
_snake_case = device if device else get_device()
if vqgan:
_snake_case = vqgan
else:
_snake_case = load_vqgan(self.device , conf_path=lowerCAmelCase__ , ckpt_path=lowerCAmelCase__ )
self.vqgan.eval()
if clip:
_snake_case = clip
else:
_snake_case = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
_snake_case = ProcessorGradientFlow(device=self.device )
_snake_case = iterations
_snake_case = lr
_snake_case = log
_snake_case = make_grid
_snake_case = return_val
_snake_case = quantize
_snake_case = self.vqgan.decoder.z_shape
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=5 , lowerCAmelCase_=True ) -> int:
_snake_case = []
if output_path is None:
_snake_case = "./animation.gif"
if input_path is None:
_snake_case = self.save_path
_snake_case = sorted(glob(input_path + '/*' ) )
if not len(lowerCAmelCase__ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(lowerCAmelCase__ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
_snake_case = total_duration / len(lowerCAmelCase__ )
_snake_case = [frame_duration] * len(lowerCAmelCase__ )
if extend_frames:
_snake_case = 1.5
_snake_case = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(lowerCAmelCase__ ) )
imageio.mimsave(lowerCAmelCase__ , lowerCAmelCase__ , duration=lowerCAmelCase__ )
print(F'''gif saved to {output_path}''' )
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Dict:
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
_snake_case = preprocess(Image.open(lowerCAmelCase__ ) , target_image_size=256 ).to(self.device )
_snake_case = preprocess_vqgan(lowerCAmelCase__ )
_snake_case = self.vqgan.encode(lowerCAmelCase__ )
return z
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
_snake_case = self.latent.detach().requires_grad_()
_snake_case = base_latent + transform_vector
if self.quantize:
_snake_case = self.vqgan.quantize(lowerCAmelCase__ )
else:
_snake_case = trans_latent
return self.vqgan.decode(lowerCAmelCase__ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Tuple:
_snake_case = self.clip_preprocessor(text=lowerCAmelCase__ , images=lowerCAmelCase__ , return_tensors='pt' , padding=lowerCAmelCase__ )
_snake_case = self.clip(**lowerCAmelCase__ )
_snake_case = clip_outputs.logits_per_image
if weights is not None:
_snake_case = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case = self._get_clip_similarity(pos_prompts['prompts'] , lowerCAmelCase__ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
_snake_case = self._get_clip_similarity(neg_prompts['prompts'] , lowerCAmelCase__ , weights=neg_prompts['weights'] )
else:
_snake_case = torch.tensor([1] , device=self.device )
_snake_case = -torch.log(lowerCAmelCase__ ) + torch.log(lowerCAmelCase__ )
return loss
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = torch.randn_like(self.latent , requires_grad=lowerCAmelCase__ , device=self.device )
_snake_case = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_snake_case = self._add_vector(lowerCAmelCase__ )
_snake_case = loop_post_process(lowerCAmelCase__ )
_snake_case = self._get_CLIP_loss(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
print('CLIP loss' , lowerCAmelCase__ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
wandb.init(reinit=lowerCAmelCase__ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
_snake_case = Image.open(lowerCAmelCase__ )
_snake_case = image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(lowerCAmelCase__ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
if not prompts:
return []
_snake_case = []
_snake_case = []
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_snake_case = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(lowerCAmelCase__ , (tuple, list) ):
_snake_case = prompt[0]
_snake_case = float(prompt[1] )
elif ":" in prompt:
_snake_case = prompt.split(':' )
_snake_case = float(lowerCAmelCase__ )
else:
_snake_case = prompt
_snake_case = 1.0
processed_prompts.append(lowerCAmelCase__ )
weights.append(lowerCAmelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase__ , device=self.device ),
}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , ) -> Tuple:
if image_path:
_snake_case = self._get_latent(lowerCAmelCase__ )
else:
_snake_case = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
_snake_case = self.process_prompts(lowerCAmelCase__ )
_snake_case = self.process_prompts(lowerCAmelCase__ )
if save_final and save_path is None:
_snake_case = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
else:
_snake_case = save_path + "_" + get_timestamp()
os.makedirs(lowerCAmelCase__ )
_snake_case = save_path
_snake_case = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(lowerCAmelCase__ ) )
_snake_case = loop_post_process(lowerCAmelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ):
if show_intermediate:
show_pil(lowerCAmelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'Image': wandb.Image(lowerCAmelCase__ )} )
if show_final:
show_pil(lowerCAmelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 363
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_snake_case = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_snake_case = 1
if upper_limit > 0:
_snake_case = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
UpperCAmelCase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 364
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ = True
except (ImportError, AttributeError):
UpperCAmelCase_ = object
def lowerCamelCase__ ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Any ) -> int:
'''simple docstring'''
pass
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger("""transformers-cli/serving""")
def lowerCamelCase__ ( UpperCamelCase__ : Namespace ) -> Dict:
'''simple docstring'''
_snake_case = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(snake_case__ , args.host , args.port , args.workers )
class UpperCamelCase_ ( __lowerCAmelCase ):
lowerCAmelCase_ = 42
class UpperCamelCase_ ( __lowerCAmelCase ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCamelCase_ ( __lowerCAmelCase ):
lowerCAmelCase_ = 42
class UpperCamelCase_ ( __lowerCAmelCase ):
lowerCAmelCase_ = 42
class UpperCamelCase_ ( __lowerCAmelCase ):
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase_ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase_ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase_ , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase_ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase_ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase_ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase_ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = pipeline
_snake_case = host
_snake_case = port
_snake_case = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install \"transformers[serving]\".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
_snake_case = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
] , timeout=600 , )
def lowerCAmelCase ( self ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowerCAmelCase ( self ) -> Union[str, Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> List[Any]:
try:
_snake_case = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
_snake_case = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
def lowerCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Dict:
try:
_snake_case = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
async def lowerCAmelCase ( self , lowerCAmelCase_=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Any:
# Check we don't have empty string
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_snake_case = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(500 , {'error': str(lowerCAmelCase_ )} )
| 365
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase ( self ) -> Dict:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
| 0
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase__ ( UpperCamelCase__ : NDArray[floataa] , UpperCamelCase__ : NDArray[floataa] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int , ) -> Optional[Any]:
'''simple docstring'''
_snake_case = coefficient_matrix.shape
_snake_case = constant_matrix.shape
if rowsa != colsa:
_snake_case = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowerCAmelCase__ )
if colsa != 1:
_snake_case = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowerCAmelCase__ )
if rowsa != rowsa:
_snake_case = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != rowsa:
_snake_case = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(lowerCAmelCase__ )} and {rowsa}'''
)
raise ValueError(lowerCAmelCase__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
_snake_case = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_snake_case = table.shape
strictly_diagonally_dominant(lowerCAmelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCAmelCase__ ):
_snake_case = []
for row in range(lowerCAmelCase__ ):
_snake_case = 0
for col in range(lowerCAmelCase__ ):
if col == row:
_snake_case = table[row][col]
elif col == cols - 1:
_snake_case = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_snake_case = (temp + val) / denom
new_val.append(lowerCAmelCase__ )
_snake_case = new_val
return [float(lowerCAmelCase__ ) for i in new_val]
def lowerCamelCase__ ( UpperCamelCase__ : NDArray[floataa] ) -> str:
'''simple docstring'''
_snake_case = table.shape
_snake_case = True
for i in range(0 , lowerCAmelCase__ ):
_snake_case = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
_snake_case = [False] * len(SCREAMING_SNAKE_CASE__ )
_snake_case = [-1] * len(SCREAMING_SNAKE_CASE__ )
def dfs(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
_snake_case = True
_snake_case = c
for u in graph[v]:
if not visited[u]:
dfs(SCREAMING_SNAKE_CASE__ , 1 - c )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE__ , 0 )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 367
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295
| 0
|
import baseaa
def lowerCamelCase__ ( UpperCamelCase__ : List[str] ) -> bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] ) -> str:
'''simple docstring'''
return baseaa.aaadecode(lowerCamelCase_ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
| 0
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case = Vector()
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase__ ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2] )
_snake_case = Vector([1, 2, 3, 4, 5] )
_snake_case = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([2, -1, 4] ) # for test of dot product
_snake_case = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase ( self ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def lowerCAmelCase ( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase__ , UpperCamelCase__ ) ) , '(3,4,7)' )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 0, 0, 0, 0, 0] )
_snake_case = x.copy()
self.assertEqual(str(UpperCamelCase__ ) , str(UpperCamelCase__ ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase__ ) , '(0,1,0)' )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(UpperCamelCase__ ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(UpperCamelCase__ ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase ( self ) -> None:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 369
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_snake_case = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 295
| 0
|
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=1_000 ) -> Tuple:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(snake_case_ , snake_case_ , snake_case_ )
if b != 1:
_snake_case = True
for _ in range(snake_case_ ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowercase = abs(int(input("""Enter bound : """).strip()))
print("""Here\'s the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 370
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 0
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ = """\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"""
UpperCAmelCase_ = """\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"""
UpperCAmelCase_ = """\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"""
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
_snake_case = simple_accuracy(a__ , a__ )
_snake_case = float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ) -> str:
'''simple docstring'''
_snake_case = np.array(a__ )
_snake_case = np.array(a__ )
_snake_case = en_sentvecs.shape[0]
# mean centering
_snake_case = en_sentvecs - np.mean(a__ , axis=0 )
_snake_case = in_sentvecs - np.mean(a__ , axis=0 )
_snake_case = cdist(a__ , a__ , 'cosine' )
_snake_case = np.array(range(a__ ) )
_snake_case = sim.argsort(axis=1 )[:, :10]
_snake_case = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> Union[str, Any]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", '
'\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", '
'\"wiki-ner\"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__lowerCAmelCase , __lowerCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", '
'\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", '
'\"wiki-ner\"]' )
| 371
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.