code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : typing.Counter[int] = Counter()
for base in range(1 ,max_perimeter + 1 ):
for perpendicular in range(_snake_case ,max_perimeter + 1 ):
SCREAMING_SNAKE_CASE__ : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowercase_ ( _snake_case = 1_000 ):
SCREAMING_SNAKE_CASE__ : Dict = pythagorean_triple(_snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 223 |
'''simple docstring'''
from __future__ import annotations
_snake_case = 8.988e9 # units = N * m^s * C^-2
def _A ( snake_case , snake_case , snake_case , snake_case ) -> dict[str, float]:
_lowercase : List[Any] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
_lowercase : Tuple = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_lowercase : List[Any] = abs(snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_lowercase : Any = abs(snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_lowercase : Union[str, Any] = (COULOMBS_CONSTANT * charge_product / abs(snake_case )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 252 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : List[Any] ,snake_case : str ,snake_case : str="<unk>" ,snake_case : List[Any]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : List[str]="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : Union[str, Any] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Tuple ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : str ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Tuple ,**snake_case : int ):
return text.split()
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Optional[Any]=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : Union[str, Any] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : Any ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Any ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Dict ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : List[str] ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : str ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : Any ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 252 | 1 |
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> tuple:
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 317 |
'''simple docstring'''
def lowerCamelCase_ ( __UpperCamelCase : int ) -> list:
"""simple docstring"""
_A = int(__UpperCamelCase )
if n_element < 1:
_A = ValueError('a should be a positive number' )
raise my_error
_A = [1]
_A , _A , _A = (0, 0, 0)
_A = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 292 | 0 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=4 , ):
__A : Optional[int] = parent
__A : List[Any] = batch_size
__A : Tuple = seq_length
__A : Optional[Any] = is_training
__A : str = use_attention_mask
__A : Union[str, Any] = use_token_type_ids
__A : Union[str, Any] = use_labels
__A : List[str] = vocab_size
__A : List[Any] = hidden_size
__A : List[Any] = num_hidden_layers
__A : Any = num_attention_heads
__A : str = intermediate_size
__A : str = hidden_act
__A : Dict = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Union[str, Any] = type_sequence_label_size
__A : Tuple = initializer_range
__A : str = num_choices
def UpperCAmelCase_ ( self ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_attention_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Union[str, Any] = None
if self.use_token_type_ids:
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self ):
__A : List[str] = self.prepare_config_and_inputs()
__A , __A , __A , __A : str = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.prepare_config_and_inputs()
__A , __A , __A , __A : Optional[Any] = config_and_inputs
__A : Dict = True
__A : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = True
UpperCamelCase : Any = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = FlaxBertModelTester(self )
@slow
def UpperCAmelCase_ ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
__A : List[str] = FlaxBertModel.from_pretrained('bert-base-cased' )
__A : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 77 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = ProphetNetTokenizer
UpperCamelCase : Tuple = False
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = 'UNwant\u00E9d,running'
__A : List[str] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self ):
__A : Tuple = self.tokenizer_class(self.vocab_file )
__A : List[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self ):
__A : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCAmelCase_ ( self ):
__A : List[str] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Dict = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : List[Any] = BasicTokenizer(do_lower_case=_A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__A : Optional[int] = {}
for i, token in enumerate(_A ):
__A : Tuple = i
__A : Tuple = WordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def UpperCAmelCase_ ( self ):
__A : int = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__A : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__A : str = tokenizer(_A , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
__A : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Any = tokenizer.encode('sequence builders' , add_special_tokens=_A )
__A : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_A )
__A : str = tokenizer.build_inputs_with_special_tokens(_A )
__A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 77 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
__lowerCamelCase = VideoMAEConfig()
set_architecture_configs(_A , _A )
if "finetuned" not in model_name:
__lowerCamelCase = False
if "finetuned" in model_name:
__lowerCamelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCamelCase = 400
__lowerCamelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCamelCase = 174
__lowerCamelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCamelCase = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(_A ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase__ ( _A: Optional[int] , _A: List[str] ):
'''simple docstring'''
if "small" in model_name:
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 3
__lowerCamelCase = 192
__lowerCamelCase = 768
elif "large" in model_name:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 8
__lowerCamelCase = 512
__lowerCamelCase = 2048
elif "huge" in model_name:
__lowerCamelCase = 1280
__lowerCamelCase = 5120
__lowerCamelCase = 32
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 8
__lowerCamelCase = 640
__lowerCamelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
if "encoder." in name:
__lowerCamelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCamelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCamelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCamelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCamelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCamelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCamelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCamelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCamelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCamelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCamelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""head""" , """classifier""" )
return name
def UpperCamelCase__ ( _A: List[str] , _A: Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(_A )
if key.startswith("""encoder.""" ):
__lowerCamelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCamelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCamelCase = config.decoder_hidden_size
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = config.hidden_size
__lowerCamelCase = int(key_split[1] )
__lowerCamelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val
return orig_state_dict
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCamelCase = np.load(_A )
return list(_A )
def UpperCamelCase__ ( _A: Dict , _A: Optional[Any] , _A: Tuple , _A: int ):
'''simple docstring'''
__lowerCamelCase = get_videomae_config(_A )
if "finetuned" in model_name:
__lowerCamelCase = VideoMAEForVideoClassification(_A )
else:
__lowerCamelCase = VideoMAEForPreTraining(_A )
# download original checkpoint, hosted on Google Drive
__lowerCamelCase = """pytorch_model.bin"""
gdown.cached_download(_A , _A , quiet=_A )
__lowerCamelCase = torch.load(_A , map_location="""cpu""" )
if "model" in files:
__lowerCamelCase = files["""model"""]
else:
__lowerCamelCase = files["""module"""]
__lowerCamelCase = convert_state_dict(_A , _A )
model.load_state_dict(_A )
model.eval()
# verify model on basic input
__lowerCamelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(_A , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCamelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCamelCase = torch.load(_A )
__lowerCamelCase = model(**_A )
__lowerCamelCase = outputs.logits
__lowerCamelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCamelCase = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _A , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCamelCase = outputs.loss
assert torch.allclose(_A , _A , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
model.save_pretrained(_A )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(_A , organization="""nielsr""" )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a : List[str] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 479 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_a : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 479 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''nat'''
__SCREAMING_SNAKE_CASE = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase=4 , lowerCamelCase=3 , lowerCamelCase=64 , lowerCamelCase=[3, 4, 6, 5] , lowerCamelCase=[2, 4, 8, 16] , lowerCamelCase=7 , lowerCamelCase=3.0 , lowerCamelCase=True , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase="gelu" , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ) -> str:
'''simple docstring'''
super().__init__(**lowerCamelCase )
UpperCamelCase : Optional[int] = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : Tuple = embed_dim
UpperCamelCase : Union[str, Any] = depths
UpperCamelCase : Tuple = len(lowerCamelCase )
UpperCamelCase : Dict = num_heads
UpperCamelCase : Any = kernel_size
UpperCamelCase : Optional[Any] = mlp_ratio
UpperCamelCase : List[str] = qkv_bias
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = drop_path_rate
UpperCamelCase : str = hidden_act
UpperCamelCase : Optional[Any] = layer_norm_eps
UpperCamelCase : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase : int = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
UpperCamelCase : List[Any] = layer_scale_init_value
UpperCamelCase : Tuple = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(lowerCamelCase ) + 1 )]
UpperCamelCase , UpperCamelCase : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 435 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 435 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def a_ ( self ) -> Tuple:
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 5
# Realm tok
UpperCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCAmelCase = os.path.join(lowercase_ , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCAmelCase = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
def a_ ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def a_ ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def a_ ( self ) -> Tuple:
UpperCAmelCase = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=lowercase_ , )
return block_records
def a_ ( self ) -> str:
UpperCAmelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = self.get_config()
UpperCAmelCase = self.get_dummy_retriever()
UpperCAmelCase = retriever.tokenizer
UpperCAmelCase = np.array([0, 3] , dtype='long' )
UpperCAmelCase = tokenizer(['Test question'] ).input_ids
UpperCAmelCase = tokenizer(
['the fourth'] , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , ).input_ids
UpperCAmelCase = config.reader_seq_len
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever(
lowercase_ , lowercase_ , answer_ids=lowercase_ , max_length=lowercase_ , return_tensors='np' )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def a_ ( self ) -> str:
UpperCAmelCase = self.get_config()
UpperCAmelCase = self.get_dummy_retriever()
UpperCAmelCase = retriever.tokenizer
UpperCAmelCase = np.array([0, 3, 5] , dtype='long' )
UpperCAmelCase = tokenizer(['Test question'] ).input_ids
UpperCAmelCase = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , ).input_ids
UpperCAmelCase = config.reader_seq_len
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever(
lowercase_ , lowercase_ , answer_ids=lowercase_ , max_length=lowercase_ , return_tensors='np' )
self.assertEqual([False, True, True] , lowercase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowercase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowercase_ )
def a_ ( self ) -> Dict:
UpperCAmelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
UpperCAmelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
UpperCAmelCase = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
| 373 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[str] = "sew-d"
def __init__( self , lowercase_=3_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_=2 , lowercase_=5_1_2 , lowercase_=2_5_6 , lowercase_=True , lowercase_=True , lowercase_=("p2c", "c2p") , lowercase_="layer_norm" , lowercase_="gelu_python" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.0_2 , lowercase_=1E-7 , lowercase_=1E-5 , lowercase_="group" , lowercase_="gelu" , lowercase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase_=False , lowercase_=1_2_8 , lowercase_=1_6 , lowercase_=True , lowercase_=0.0_5 , lowercase_=1_0 , lowercase_=2 , lowercase_=0.0 , lowercase_=1_0 , lowercase_=0 , lowercase_="mean" , lowercase_=False , lowercase_=False , lowercase_=2_5_6 , lowercase_=0 , lowercase_=1 , lowercase_=2 , **lowercase_ , ) -> Dict:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_norm
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = squeeze_factor
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = position_buckets
UpperCAmelCase = share_att_key
UpperCAmelCase = relative_attention
UpperCAmelCase = norm_rel_ebd
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = feature_layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# sequence classification
UpperCAmelCase = use_weighted_layer_sum
UpperCAmelCase = classifier_proj_size
@property
def a_ ( self ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 373 | 1 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __snake_case:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=13 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=99 , __lowerCamelCase=64 , __lowerCamelCase=32 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=37 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=16 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ):
'''simple docstring'''
__A : Dict = parent
__A : List[Any] = batch_size
__A : List[str] = seq_length
__A : List[str] = is_training
__A : int = use_input_mask
__A : str = use_token_type_ids
__A : Union[str, Any] = use_labels
__A : Optional[int] = vocab_size
__A : str = hidden_size
__A : Optional[int] = embedding_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Any = intermediate_size
__A : List[Any] = hidden_act
__A : Dict = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : List[Any] = max_position_embeddings
__A : Optional[Any] = type_vocab_size
__A : Dict = type_sequence_label_size
__A : Optional[Any] = initializer_range
__A : int = num_labels
__A : Any = num_choices
__A : List[str] = scope
def _a ( self ):
'''simple docstring'''
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : List[str] = None
if self.use_input_mask:
__A : Any = random_attention_mask([self.batch_size, self.seq_length] )
__A : Any = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : int = None
__A : Any = None
__A : Union[str, Any] = None
if self.use_labels:
__A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__A : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : int = MegatronBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A : Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__A : List[str] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__A : Union[str, Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = MegatronBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Any = MegatronBertForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : str = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A : Optional[int] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Any = MegatronBertForPreTraining(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A : Optional[int] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[str] = MegatronBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A : Optional[int] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Dict = MegatronBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : str = self.num_labels
__A : int = MegatronBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A : Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = self.num_choices
__A : Tuple = MegatronBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : str = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : List[str] = config_and_inputs
__A : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __snake_case( __a , __a , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
# test_resize_embeddings = False
_UpperCAmelCase = False
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
'''simple docstring'''
__A : str = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
__A : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ )
__A : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def _a ( self ):
'''simple docstring'''
__A : str = MegatronBertModelTester(self )
__A : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_ )
def _a ( self ):
'''simple docstring'''
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_ )
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_ )
def _a ( self ):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_ )
def _a ( self ):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_ )
def _a ( self ):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_ )
def _a ( self ):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_ )
def _a ( self ):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_ )
def _lowercase ( _SCREAMING_SNAKE_CASE : int ) -> Dict:
'''simple docstring'''
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
lowerCamelCase : Optional[Any] =1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('Model is not available.' )
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
__A : List[Any] = os.path.join(os.environ['MYDIR'] , lowerCAmelCase_ )
__A : Optional[Any] = MegatronBertModel.from_pretrained(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.half()
__A : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__A : Optional[Any] = model(lowerCAmelCase_ )[0]
__A : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__A : Optional[Any] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
__A : Union[str, Any] = output[0, ii, jj]
__A : Tuple = expected[3 * ii + jj]
__A : Tuple = 'ii={} jj={} a={} b={}'.format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_ ) , msg=lowerCAmelCase_ )
| 719 | """simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase : Tuple =logging.get_logger(__name__)
lowerCamelCase : List[Any] ={
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = "van"
def __init__( self , __lowerCamelCase=224 , __lowerCamelCase=3 , __lowerCamelCase=[7, 3, 3, 3] , __lowerCamelCase=[4, 2, 2, 2] , __lowerCamelCase=[64, 128, 320, 512] , __lowerCamelCase=[3, 3, 12, 3] , __lowerCamelCase=[8, 8, 4, 4] , __lowerCamelCase="gelu" , __lowerCamelCase=0.02 , __lowerCamelCase=1e-6 , __lowerCamelCase=1e-2 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : List[Any] = image_size
__A : List[str] = num_channels
__A : Dict = patch_sizes
__A : str = strides
__A : List[str] = hidden_sizes
__A : List[str] = depths
__A : int = mlp_ratios
__A : Union[str, Any] = hidden_act
__A : List[str] = initializer_range
__A : List[Any] = layer_norm_eps
__A : int = layer_scale_init_value
__A : str = drop_path_rate
__A : List[Any] = dropout_rate
| 237 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 1 / sqrt(2 ) ):
"""simple docstring"""
snake_case = tau * frequency / samplerate
snake_case = sin(UpperCamelCase_ )
snake_case = cos(UpperCamelCase_ )
snake_case = _sin / (2 * q_factor)
snake_case = (1 - _cos) / 2
snake_case = 1 - _cos
snake_case = 1 + alpha
snake_case = -2 * _cos
snake_case = 1 - alpha
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 1 / sqrt(2 ) ):
"""simple docstring"""
snake_case = tau * frequency / samplerate
snake_case = sin(UpperCamelCase_ )
snake_case = cos(UpperCamelCase_ )
snake_case = _sin / (2 * q_factor)
snake_case = (1 + _cos) / 2
snake_case = -1 - _cos
snake_case = 1 + alpha
snake_case = -2 * _cos
snake_case = 1 - alpha
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 1 / sqrt(2 ) ):
"""simple docstring"""
snake_case = tau * frequency / samplerate
snake_case = sin(UpperCamelCase_ )
snake_case = cos(UpperCamelCase_ )
snake_case = _sin / (2 * q_factor)
snake_case = _sin / 2
snake_case = 0
snake_case = -ba
snake_case = 1 + alpha
snake_case = -2 * _cos
snake_case = 1 - alpha
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 1 / sqrt(2 ) ):
"""simple docstring"""
snake_case = tau * frequency / samplerate
snake_case = sin(UpperCamelCase_ )
snake_case = cos(UpperCamelCase_ )
snake_case = _sin / (2 * q_factor)
snake_case = 1 - alpha
snake_case = -2 * _cos
snake_case = 1 + alpha
snake_case = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 1 / sqrt(2 ) ,):
"""simple docstring"""
snake_case = tau * frequency / samplerate
snake_case = sin(UpperCamelCase_ )
snake_case = cos(UpperCamelCase_ )
snake_case = _sin / (2 * q_factor)
snake_case = 10 ** (gain_db / 40)
snake_case = 1 + alpha * big_a
snake_case = -2 * _cos
snake_case = 1 - alpha * big_a
snake_case = 1 + alpha / big_a
snake_case = -2 * _cos
snake_case = 1 - alpha / big_a
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 1 / sqrt(2 ) ,):
"""simple docstring"""
snake_case = tau * frequency / samplerate
snake_case = sin(UpperCamelCase_ )
snake_case = cos(UpperCamelCase_ )
snake_case = _sin / (2 * q_factor)
snake_case = 10 ** (gain_db / 40)
snake_case = (big_a + 1) - (big_a - 1) * _cos
snake_case = (big_a + 1) + (big_a - 1) * _cos
snake_case = (big_a - 1) - (big_a + 1) * _cos
snake_case = (big_a - 1) + (big_a + 1) * _cos
snake_case = 2 * sqrt(UpperCamelCase_ ) * alpha
snake_case = big_a * (pmc + aaa)
snake_case = 2 * big_a * mpc
snake_case = big_a * (pmc - aaa)
snake_case = ppmc + aaa
snake_case = -2 * pmpc
snake_case = ppmc - aaa
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 1 / sqrt(2 ) ,):
"""simple docstring"""
snake_case = tau * frequency / samplerate
snake_case = sin(UpperCamelCase_ )
snake_case = cos(UpperCamelCase_ )
snake_case = _sin / (2 * q_factor)
snake_case = 10 ** (gain_db / 40)
snake_case = (big_a + 1) - (big_a - 1) * _cos
snake_case = (big_a + 1) + (big_a - 1) * _cos
snake_case = (big_a - 1) - (big_a + 1) * _cos
snake_case = (big_a - 1) + (big_a + 1) * _cos
snake_case = 2 * sqrt(UpperCamelCase_ ) * alpha
snake_case = big_a * (ppmc + aaa)
snake_case = -2 * big_a * pmpc
snake_case = big_a * (ppmc - aaa)
snake_case = pmc + aaa
snake_case = 2 * mpc
snake_case = pmc - aaa
snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 550 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = tempfile.mkdtemp()
snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'''do_convert_rgb''': True,
}
snake_case = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__snake_case , __snake_case )
def a_ ( self , **__snake_case ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self , **__snake_case ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self , **__snake_case ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
snake_case = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self ):
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = self.get_image_processor()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
snake_case = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
snake_case = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def a_ ( self ):
snake_case = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
snake_case = self.get_image_processor(do_normalize=__snake_case )
snake_case = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=__snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__snake_case , return_tensors='''np''' )
snake_case = processor(images=__snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
snake_case = '''Alexandra,T-shirt的价格是15便士。'''
snake_case = processor(text=__snake_case )
snake_case = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
snake_case = '''Alexandra,T-shirt的价格是15便士。'''
snake_case = self.prepare_image_inputs()
snake_case = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case = processor.batch_decode(__snake_case )
snake_case = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
snake_case = '''Alexandra,T-shirt的价格是15便士。'''
snake_case = self.prepare_image_inputs()
snake_case = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 550 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : str = MgpstrTokenizer
_lowercase : str = False
_lowercase : List[Any] = {}
_lowercase : Union[str, Any] = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
_lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = """tester"""
_lowerCAmelCase = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_lowerCAmelCase = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_lowerCAmelCase = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
_lowerCAmelCase = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_lowerCAmelCase , _lowerCAmelCase = self.get_input_output_texts(_lowercase )
_lowerCAmelCase = tokenizer.tokenize(_lowercase )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase )
_lowerCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
_lowerCAmelCase = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def _lowercase ( self ):
"""simple docstring"""
pass
| 162 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {"""vocab_file""": """vocab.txt"""}
_lowercase = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_lowercase = {
"""openbmb/cpm-ant-10b""": 1024,
}
def A (__lowerCamelCase :str ):
_lowerCAmelCase = collections.OrderedDict()
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as reader:
_lowerCAmelCase = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
_lowerCAmelCase = token.rstrip("""\n""" )
_lowerCAmelCase = index
return vocab
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase="<unk>" , _lowercase=200 ):
"""simple docstring"""
_lowerCAmelCase = vocab
_lowerCAmelCase = unk_token
_lowerCAmelCase = max_input_chars_per_word
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = list(_lowercase )
if len(_lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase = 0
_lowerCAmelCase = []
while start < len(_lowercase ):
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = None
while start < end:
_lowerCAmelCase = """""".join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowercase )
_lowerCAmelCase = end
return sub_tokens
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = ['''input_ids''', '''attention_mask''']
_lowercase : Union[str, Any] = False
def __init__( self , _lowercase , _lowercase="<d>" , _lowercase="</d>" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="<pad>" , _lowercase="<unk>" , _lowercase="</n>" , _lowercase="</_>" , _lowercase="left" , **_lowercase , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , )
_lowerCAmelCase = bod_token
_lowerCAmelCase = eod_token
_lowerCAmelCase = load_vocab(_lowercase )
_lowerCAmelCase = self.encoder[space_token]
_lowerCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = []
for x in jieba.cut(_lowercase , cut_all=_lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase ) )
return output_tokens
def _lowercase ( self , _lowercase , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = [i for i in token_ids if i >= 0]
_lowerCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowercase , **_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return token in self.encoder
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return "".join(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.decoder.get(_lowercase , self.unk_token )
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if os.path.isdir(_lowercase ):
_lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_lowerCAmelCase = 0
if " " in self.encoder:
_lowerCAmelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase = self.encoder["""\n"""]
del self.encoder["\n"]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase ))
return [1] + ([0] * len(_lowercase ))
| 162 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : List[str] = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> Optional[int]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : List[str] = random.Random()
_lowerCamelCase : str = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[Any] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : str = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : List[Any] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : Optional[int] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : List[Any] = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : str = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Optional[Any] = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[str] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : str = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : int = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Union[str, Any] = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Dict = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[int] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : Optional[int] = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[Any] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : Dict = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : List[Any] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[str] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : List[str] = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : Dict = 2
_lowerCamelCase : List[str] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Tuple = max_length
_lowerCamelCase : Optional[int] = 0.8
_lowerCamelCase : List[str] = 10
_lowerCamelCase : Tuple = 0.3
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : List[str] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = jit(model.generate )
_lowerCamelCase : Tuple = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Optional[Any] = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : str = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : List[str] = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Tuple = 2
_lowerCamelCase : List[str] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Optional[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : str = "Hello world"
_lowerCamelCase : List[Any] = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : str = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase ) | 46 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
_lowerCAmelCase = {"allegro/herbert-base-cased": 514}
_lowerCAmelCase = {}
class SCREAMING_SNAKE_CASE ( __A ):
"""simple docstring"""
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = HerbertTokenizer
def __init__(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_="</s>" , **lowerCAmelCase_ , ):
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
A_ : List[Any] = [self.cls_token_id]
A_ : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
A_ : Optional[Any] = [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
A_ : List[Any] = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 708 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : str = OmegaConf.load(snake_case__ )
A_ : List[str] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
A_ : Any = list(state_dict.keys() )
# extract state_dict for VQVAE
A_ : Any = {}
A_ : Union[str, Any] = """first_stage_model."""
for key in keys:
if key.startswith(snake_case__ ):
A_ : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
A_ : Tuple = {}
A_ : Dict = """model.diffusion_model."""
for key in keys:
if key.startswith(snake_case__ ):
A_ : Optional[Any] = state_dict[key]
A_ : List[Any] = config.model.params.first_stage_config.params
A_ : Tuple = config.model.params.unet_config.params
A_ : Any = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
A_ : Optional[int] = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
A_ : List[str] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
A_ : str = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
_lowerCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 480 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Dict = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = image_size
_lowercase : Optional[int] = patch_size
_lowercase : str = num_channels
_lowercase : Optional[int] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : Optional[Any] = hidden_act
_lowercase : List[Any] = conv_kernel_size
_lowercase : Any = output_stride
_lowercase : Optional[int] = classifier_dropout_prob
_lowercase : int = use_labels
_lowercase : Any = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : List[Any] = initializer_range
_lowercase : int = scope
_lowercase : Optional[Any] = width_multiplier
_lowercase : List[Any] = ffn_dropout
_lowercase : List[Any] = attn_dropout
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Optional[int] = None
_lowercase : int = None
if self.use_labels:
_lowercase : Any = ids_tensor([self.batch_size], self.num_labels)
_lowercase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Tuple = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.num_labels
_lowercase : Dict = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : List[str] = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : Union[str, Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : int = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : List[str] = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : str = False
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = MobileViTVaModelTester(self)
_lowercase : Any = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : str = model_class(lowerCamelCase)
_lowercase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : str = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : str = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : List[Any] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : Optional[int] = outputs.hidden_states
_lowercase : Dict = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Dict = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Any = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Union[str, Any] = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Optional[int]:
_lowercase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : List[Any] = self.default_image_processor
_lowercase : Dict = prepare_img()
_lowercase : Any = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
# verify the logits
_lowercase : List[str] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Dict = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : List[Any] = prepare_img()
_lowercase : Union[str, Any] = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[str] = model(**lowerCamelCase)
_lowercase : Union[str, Any] = outputs.logits
# verify the logits
_lowercase : List[Any] = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Dict = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : List[str] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : List[str] = prepare_img()
_lowercase : List[str] = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Optional[Any] = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Any = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 89 |
"""simple docstring"""
import argparse
from collections import defaultdict
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : Dict = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(snake_case__ , """r""" ) as f:
_snake_case : Optional[Any] = f.readlines()
_snake_case : Tuple = F"class {class_name}("
_snake_case : Tuple = F"{4 * ' '}def {test_name}("
_snake_case : Optional[int] = F"{8 * ' '}{correct_line.split()[0]}"
_snake_case : Union[str, Any] = F"{16 * ' '}{correct_line.split()[0]}"
_snake_case : Optional[int] = False
_snake_case : Optional[int] = False
_snake_case : Optional[Any] = False
_snake_case : Optional[Any] = False
_snake_case : List[Any] = 0
_snake_case : List[Any] = 0
_snake_case : List[str] = []
for line in lines:
if line.startswith(snake_case__ ):
_snake_case : Union[str, Any] = True
elif in_class and line.startswith(snake_case__ ):
_snake_case : str = True
elif in_class and in_func and (line.startswith(snake_case__ ) or line.startswith(snake_case__ )):
_snake_case : Optional[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_snake_case : Optional[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_snake_case : str = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
_snake_case : int = False
else:
new_lines.append(snake_case__ )
with open(snake_case__ , """w""" ) as f:
for line in new_lines:
f.write(snake_case__ )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Any=None ):
"""simple docstring"""
if fail is not None:
with open(snake_case__ , """r""" ) as f:
_snake_case : List[Any] = {l.strip() for l in f.readlines()}
else:
_snake_case : Union[str, Any] = None
with open(snake_case__ , """r""" ) as f:
_snake_case : Union[str, Any] = f.readlines()
_snake_case : List[Any] = defaultdict(snake_case__ )
for line in correct_lines:
_snake_case , _snake_case , _snake_case , _snake_case : List[Any] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
A_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 609 | 0 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = 0
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCamelCase__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCamelCase__ ) , 0 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
# Check that tokenizer_type ≠ model_type
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , config=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowerCamelCase__ , '''vocab.txt''' ) )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , tokenizer_type='''bert''' , use_fast=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowerCamelCase__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowerCamelCase__ , '''merges.txt''' ) )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , tokenizer_type='''gpt2''' , use_fast=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@require_tokenizers
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowerCamelCase__ , '''vocab.txt''' ) )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , tokenizer_type='''bert''' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowerCamelCase__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowerCamelCase__ , '''merges.txt''' ) )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , tokenizer_type='''gpt2''' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
with pytest.raises(lowerCamelCase__ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCamelCase = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(lowerCamelCase__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCamelCase__ )
else:
self.assertEqual(tokenizer.do_lower_case , lowerCamelCase__ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCamelCase__ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
UpperCamelCase = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = TOKENIZER_MAPPING.values()
UpperCamelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCamelCase__ )
@require_tokenizers
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , lowerCamelCase__ )
@require_tokenizers
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=lowerCamelCase__ )
UpperCamelCase = '''Hello, world. How are you?'''
UpperCamelCase = tokenizer.tokenize(lowerCamelCase__ )
self.assertEqual('''[UNK]''' , tokens[0] )
UpperCamelCase = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=lowerCamelCase__ )
UpperCamelCase = tokenizer.tokenize(lowerCamelCase__ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = get_tokenizer_config('''bert-base-cased''' )
UpperCamelCase = config.pop('''_commit_hash''' , lowerCamelCase__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCamelCase__ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCamelCase = get_tokenizer_config(lowerCamelCase__ )
self.assertDictEqual(lowerCamelCase__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ )
UpperCamelCase = get_tokenizer_config(lowerCamelCase__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def UpperCAmelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , lowerCamelCase__ )
AutoTokenizer.register(lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoTokenizer.register(lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ )
UpperCamelCase = CustomTokenizer.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCAmelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , lowerCamelCase__ )
# Can register in two steps
AutoTokenizer.register(lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowerCamelCase__ , fast_tokenizer_class=lowerCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ , fast_tokenizer_class=lowerCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoTokenizer.register(lowerCamelCase__ , fast_tokenizer_class=lowerCamelCase__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = BertTokenizerFast.from_pretrained(lowerCamelCase__ )
bert_tokenizer.save_pretrained(lowerCamelCase__ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , use_fast=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase__ , use_fast=lowerCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ , use_fast=lowerCamelCase__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def UpperCAmelCase ( self ):
'''simple docstring'''
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = False
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = NewTokenizer
_snake_case = False
try:
AutoConfig.register('''custom''' , lowerCamelCase__ )
AutoTokenizer.register(lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ )
AutoTokenizer.register(lowerCamelCase__ , fast_tokenizer_class=lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=lowerCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase__ , use_fast=lowerCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase__ , use_fast=lowerCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowerCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCamelCase = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowerCamelCase__ , use_fast=lowerCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base''' )
def UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , revision='''aaaaaa''' )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 350 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _UpperCAmelCase : Optional[int], _UpperCAmelCase : Tuple, _UpperCAmelCase : Any):
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
count += _in_place_quick_sort(_UpperCAmelCase, _UpperCAmelCase, p - 1)
count += _in_place_quick_sort(_UpperCAmelCase, p + 1, _UpperCAmelCase)
return count
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any):
UpperCamelCase = 0
UpperCamelCase = randint(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_UpperCAmelCase, _UpperCAmelCase):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
snake_case_ : List[Any] = TemporaryFile()
snake_case_ : Optional[Any] = 100 # 1000 elements are to be sorted
snake_case_ , snake_case_ : List[str] = 0, 1 # mean and standard deviation
snake_case_ : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
snake_case_ : Optional[int] = np.load(outfile)
snake_case_ : Optional[int] = len(M) - 1
snake_case_ : List[str] = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 350 | 1 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCamelCase : str = "▁" , __UpperCamelCase : bool = True , __UpperCamelCase : Union[str, AddedToken] = "<unk>" , __UpperCamelCase : Union[str, AddedToken] = "</s>" , __UpperCamelCase : Union[str, AddedToken] = "<pad>" , )->List[Any]:
_UpperCAmelCase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
_UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_UpperCAmelCase = token_dict['''token''']
_UpperCAmelCase = Tokenizer(Unigram() )
_UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
_UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase ),
pre_tokenizers.Digits(individual_digits=__UpperCamelCase ),
pre_tokenizers.Punctuation(),
] )
_UpperCAmelCase = decoders.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = TemplateProcessing(
single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
_UpperCAmelCase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 8_0_0_0 , __UpperCamelCase : bool = True , )->Any:
_UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = [files]
self._tokenizer.train(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Union[Iterator[str], Iterator[Iterator[str]]] , __UpperCamelCase : int = 8_0_0_0 , __UpperCamelCase : bool = True , )->int:
_UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
self._tokenizer.train_from_iterator(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def lowercase__ ( self : int )->Dict:
_UpperCAmelCase = json.loads(self._tokenizer.to_str() )
_UpperCAmelCase = self.special_tokens['''unk''']['''id''']
_UpperCAmelCase = Tokenizer.from_str(json.dumps(__UpperCamelCase ) )
| 602 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """vivit"""
def __init__( self : int , __UpperCamelCase : Union[str, Any]=2_2_4 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : str=[2, 1_6, 1_6] , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : int=7_6_8 , __UpperCamelCase : List[str]=1_2 , __UpperCamelCase : str=1_2 , __UpperCamelCase : Optional[Any]=3_0_7_2 , __UpperCamelCase : Optional[Any]="gelu_fast" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : List[str]=1e-06 , __UpperCamelCase : Union[str, Any]=True , **__UpperCamelCase : Any , )->Optional[int]:
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
super().__init__(**__UpperCamelCase )
| 602 | 1 |
"""simple docstring"""
from collections import namedtuple
__magic_name__ = namedtuple("from_to", "from_ to")
__magic_name__ = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ """, """.join(UpperCamelCase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ """, """.join(UpperCamelCase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__magic_name__ = parser.parse_args()
__magic_name__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 248 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCamelCase ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : int = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_lowercase : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase_ : Tuple = logging.getLogger(__name__)
def UpperCamelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_lowercase : Optional[Any] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_lowercase : Optional[Any] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_lowercase : Optional[int] = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
_lowercase : str = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=f"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCamelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowercase ( pl.Callback ):
def _a(self : Optional[Any] , snake_case : Optional[int] , snake_case : Any ) -> Tuple:
_lowercase : List[str] = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case )
@rank_zero_only
def _a(self : Dict , snake_case : pl.Trainer , snake_case : pl.LightningModule , snake_case : str , snake_case : Optional[Any]=True ) -> None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_lowercase : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_lowercase : Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowercase : List[str] = od / "test_results.txt"
_lowercase : Any = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowercase : Dict = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_lowercase : str = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=snake_case )
generations_file.parent.mkdir(exist_ok=snake_case )
with open(snake_case , "a+" ) as writer:
for key in sorted(snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowercase : List[str] = metrics[key]
if isinstance(snake_case , torch.Tensor ):
_lowercase : int = val.item()
_lowercase : List[Any] = F"""{key}: {val:.6f}\n"""
writer.write(snake_case )
if not save_generations:
return
if "preds" in metrics:
_lowercase : List[Any] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(snake_case )
@rank_zero_only
def _a(self : Any , snake_case : List[str] , snake_case : Any ) -> Union[str, Any]:
try:
_lowercase : List[Any] = pl_module.model.model.num_parameters()
except AttributeError:
_lowercase : Optional[Any] = pl_module.model.num_parameters()
_lowercase : str = count_trainable_parameters(snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def _a(self : int , snake_case : pl.Trainer , snake_case : pl.LightningModule ) -> str:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case , snake_case , "test" )
@rank_zero_only
def _a(self : int , snake_case : pl.Trainer , snake_case : List[str] ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 461 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowercase :
def __init__(self : Dict , snake_case : Dict , snake_case : List[Any]=13 , snake_case : int=7 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Any=2 , snake_case : Optional[Any]=4 , snake_case : int=37 , snake_case : Dict="gelu" , snake_case : Dict=0.1 , snake_case : Dict=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : Union[str, Any]=2 , snake_case : Dict=0.02 , snake_case : str=3 , snake_case : str=4 , snake_case : Optional[Any]=None , snake_case : str=0 , ) -> Optional[Any]:
_lowercase : int = parent
_lowercase : Tuple = batch_size
_lowercase : str = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Tuple = use_token_type_ids
_lowercase : List[Any] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Dict = intermediate_size
_lowercase : List[Any] = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Tuple = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : Tuple = num_labels
_lowercase : List[Any] = num_choices
_lowercase : Dict = scope
_lowercase : Optional[Any] = projection_dim
def _a(self : Tuple ) -> List[str]:
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : List[Any] = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
_lowercase : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a(self : Union[str, Any] , snake_case : Any , snake_case : Tuple , snake_case : str , snake_case : str , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : str ) -> List[str]:
_lowercase : List[Any] = TFDPRContextEncoder(config=snake_case )
_lowercase : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
_lowercase : Optional[int] = model(snake_case , token_type_ids=snake_case )
_lowercase : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a(self : List[Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Any , snake_case : int , snake_case : Optional[int] , snake_case : Any , snake_case : Union[str, Any] ) -> int:
_lowercase : Optional[Any] = TFDPRQuestionEncoder(config=snake_case )
_lowercase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
_lowercase : str = model(snake_case , token_type_ids=snake_case )
_lowercase : str = model(snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a(self : Union[str, Any] , snake_case : Any , snake_case : int , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Any:
_lowercase : Any = TFDPRReader(config=snake_case )
_lowercase : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a(self : int ) -> Optional[Any]:
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : int = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
_A = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_A = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
_A = False
_A = False
_A = False
_A = False
_A = False
def _a(self : str ) -> List[str]:
_lowercase : List[Any] = TFDPRModelTester(self )
_lowercase : Any = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _a(self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a(self : Any ) -> Tuple:
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*snake_case )
def _a(self : Dict ) -> Optional[Any]:
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*snake_case )
def _a(self : List[Any] ) -> Any:
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*snake_case )
@slow
def _a(self : int ) -> str:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = TFDPRContextEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[str] = TFDPRContextEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : int = TFDPRQuestionEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = TFDPRReader.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def _a(self : Dict ) -> Any:
_lowercase : Any = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
_lowercase : Optional[int] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_lowercase : List[Any] = model(snake_case )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowercase : Optional[Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 461 | 1 |
def A ( UpperCAmelCase , UpperCAmelCase ):
_snake_case : Dict = ""
for word_or_phrase in separated:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod() | 278 |
def A ( UpperCAmelCase ):
return str(UpperCAmelCase ) == str(UpperCAmelCase )[::-1]
def A ( UpperCAmelCase ):
return int(UpperCAmelCase ) + int(str(UpperCAmelCase )[::-1] )
def A ( UpperCAmelCase = 10_000 ):
_snake_case : Optional[int] = []
for num in range(1 , UpperCAmelCase ):
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = num
while iterations < 50:
_snake_case : Optional[Any] = sum_reverse(UpperCAmelCase )
iterations += 1
if is_palindrome(UpperCAmelCase ):
break
else:
lychrel_nums.append(UpperCAmelCase )
return len(UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''') | 278 | 1 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : int ):
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
A = b * b - 4 * a * c
A = (-b + sqrt(snake_case__ )) / (2 * a)
A = (-b - sqrt(snake_case__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _snake_case ( ):
A , A = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main() | 91 |
'''simple docstring'''
import functools
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
# Validation
if not isinstance(__A , __A ) or not all(isinstance(__A , __A ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__A ) != 3 or not all(isinstance(__A , __A ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__A ) == 0:
return 0
if min(__A ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__A ) >= 366:
raise ValueError('All days elements should be less than 366' )
_snake_case = set(__A )
@functools.cache
def dynamic_programming(__A ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 495 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
a : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = {}
with open(_UpperCamelCase , '''r''' ) as file:
for line_number, line in enumerate(_UpperCamelCase ):
__lowercase = line.strip()
if line:
__lowercase = line.split()
__lowercase = line_number
__lowercase = words[0]
__lowercase = value
return result
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__lowercase = getattr(_UpperCamelCase , _UpperCamelCase )
__lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCamelCase ):
__lowercase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__lowercase = '''param'''
if weight_type is not None and weight_type != "param":
__lowercase = getattr(_UpperCamelCase , _UpperCamelCase ).shape
elif weight_type is not None and weight_type == "param":
__lowercase = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__lowercase = getattr(_UpperCamelCase , _UpperCamelCase )
__lowercase = shape_pointer.shape
# let's reduce dimension
__lowercase = value[0]
else:
__lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__lowercase = getattr(_UpperCamelCase , _UpperCamelCase )
__lowercase = value
else:
__lowercase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCamelCase ):
__lowercase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__lowercase = '''param'''
if weight_type is not None and weight_type != "param":
__lowercase = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowercase = '''.'''.join([key, hf_param_name] )
else:
__lowercase = key
__lowercase = value if '''lm_head''' in full_key else value[0]
a : Tuple = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
__lowercase = False
for key, mapped_key in MAPPING.items():
__lowercase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
__lowercase = mapped_key.replace('''*''' , _UpperCamelCase )
if "weight_g" in name:
__lowercase = '''weight_g'''
elif "weight_v" in name:
__lowercase = '''weight_v'''
elif "bias" in name:
__lowercase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase = '''weight'''
else:
__lowercase = None
if hf_dict is not None:
rename_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return is_used
return is_used
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase = True
else:
__lowercase = load_wavaveca_layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = full_name.split('''conv_layers.''' )[-1]
__lowercase = name.split('''.''' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=False ):
'''simple docstring'''
if config_path is not None:
__lowercase = WavaVecaConfig.from_pretrained(_UpperCamelCase )
else:
__lowercase = WavaVecaConfig()
if is_seq_class:
__lowercase = read_txt_into_dict(_UpperCamelCase )
__lowercase = idalabel
__lowercase = WavaVecaForSequenceClassification(_UpperCamelCase )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
feature_extractor.save_pretrained(_UpperCamelCase )
elif is_finetuned:
if dict_path:
__lowercase = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase = target_dict.pad_index
__lowercase = target_dict.bos_index
__lowercase = target_dict.eos_index
__lowercase = len(target_dict.symbols )
__lowercase = os.path.join(_UpperCamelCase , '''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase = 0
__lowercase = 1
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_UpperCamelCase , _UpperCamelCase )
__lowercase = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_UpperCamelCase , )
__lowercase = True if config.feat_extract_norm == '''layer''' else False
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
__lowercase = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__lowercase = WavaVecaForCTC(_UpperCamelCase )
else:
__lowercase = WavaVecaForPreTraining(_UpperCamelCase )
if is_finetuned or is_seq_class:
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowercase = argparse.Namespace(task='''audio_pretraining''' )
__lowercase = fairseq.tasks.setup_task(_UpperCamelCase )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCamelCase )
__lowercase = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
a : int = parser.parse_args()
a : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 527 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : Optional[int] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : Union[str, Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_UpperCamelCase ) - np.asarray(_UpperCamelCase )) ** 2 ) )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_UpperCamelCase , _UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase_ ( ):
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 527 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE : Any = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(a_ )
DownloadCommand.register_subcommand(a_ )
EnvironmentCommand.register_subcommand(a_ )
RunCommand.register_subcommand(a_ )
ServeCommand.register_subcommand(a_ )
UserCommands.register_subcommand(a_ )
AddNewModelCommand.register_subcommand(a_ )
AddNewModelLikeCommand.register_subcommand(a_ )
LfsCommands.register_subcommand(a_ )
PTtoTFCommand.register_subcommand(a_ )
# Let's go
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
if not hasattr(a_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE : Any = args.func(a_ )
service.run()
if __name__ == "__main__":
main()
| 251 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bart"
lowerCAmelCase_ : Optional[Any] = ["past_key_values"]
lowerCAmelCase_ : Tuple = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a__=50_265 , a__=1_024 , a__=12 , a__=4_096 , a__=16 , a__=12 , a__=4_096 , a__=16 , a__=0.0 , a__=0.0 , a__="gelu" , a__=1_024 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=0.0 , a__=False , a__=True , a__=3 , a__=1 , a__=0 , a__=2 , a__=True , a__=2 , a__=2 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = classifier_dropout
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , a__ ):
snake_case_ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed." )
class _snake_case ( lowercase_ ):
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case_ = {0: "batch"}
snake_case_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case_ = {0: "batch", 1: "decoder_sequence"}
snake_case_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case_ , snake_case_ = self.num_layers
for i in range(a__ ):
snake_case_ = {0: "batch", 2: "past_sequence + sequence"}
snake_case_ = {0: "batch", 2: "past_sequence + sequence"}
else:
snake_case_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super().outputs
else:
snake_case_ = super(a__ , self ).outputs
if self.use_past:
snake_case_ , snake_case_ = self.num_layers
for i in range(a__ ):
snake_case_ = {0: "batch", 2: "past_sequence + sequence"}
snake_case_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase__ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
# Generate decoder inputs
snake_case_ = seq_length if not self.use_past else 1
snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
snake_case_ = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
snake_case_ = dict(**a__ , **a__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case_ , snake_case_ = common_inputs["input_ids"].shape
snake_case_ = common_inputs["decoder_input_ids"].shape[1]
snake_case_ , snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = decoder_seq_length + 3
snake_case_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(a__ , a__ )] , dim=1 )
snake_case_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case_ , snake_case_ = self.num_layers
snake_case_ = min(a__ , a__ )
snake_case_ = max(a__ , a__ ) - min_num_layers
snake_case_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(a__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(a__ ),
torch.zeros(a__ ),
torch.zeros(a__ ),
torch.zeros(a__ ),
) )
# TODO: test this.
snake_case_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(a__ , a__ ):
common_inputs["past_key_values"].append((torch.zeros(a__ ), torch.zeros(a__ )) )
return common_inputs
def lowerCAmelCase__ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case_ , snake_case_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ , snake_case_ = self.num_layers
snake_case_ , snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = common_inputs["attention_mask"].dtype
snake_case_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 )
snake_case_ = [
(torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(a__ )
]
return common_inputs
def lowerCAmelCase__ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = tokenizer.num_special_tokens_to_add(a__ )
snake_case_ = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case_ = dict(tokenizer(a__ , return_tensors=a__ ) )
return common_inputs
def lowerCAmelCase__ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
elif self.task == "causal-lm":
snake_case_ = self._generate_dummy_inputs_for_causal_lm(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
else:
snake_case_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
return common_inputs
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super()._flatten_past_key_values_(a__ , a__ , a__ , a__ )
else:
snake_case_ = super(a__ , self )._flatten_past_key_values_(
a__ , a__ , a__ , a__ )
| 400 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class a_ ( snake_case ):
UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
UpperCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class a_ ( snake_case ):
UpperCAmelCase : np.ndarray
UpperCAmelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 347 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> Dict:
_A = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict ) -> Tuple:
_A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_A = s_dict.pop(_snake_case )
elif "subsample" in key:
_A = s_dict.pop(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> str:
_A , _A = emb.weight.shape
_A = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
_A = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Union[str, Any] ) -> Optional[Any]:
_A = torch.load(_snake_case , map_location='''cpu''' )
_A = mam_aaa['''args''']
_A = mam_aaa['''model''']
_A = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(_snake_case )
rename_keys(_snake_case )
_A = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_A = args.share_decoder_input_output_embed
_A = [int(_snake_case ) for i in args.conv_kernel_sizes.split(''',''' )]
_A = SpeechaTextConfig(
vocab_size=_snake_case , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(_snake_case ) , conv_channels=args.conv_channels , conv_kernel_sizes=_snake_case , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_snake_case , num_beams=5 , max_length=200 , use_cache=_snake_case , decoder_start_token_id=2 , early_stopping=_snake_case , )
_A = SpeechaTextForConditionalGeneration(_snake_case )
_A , _A = model.model.load_state_dict(_snake_case , strict=_snake_case )
if len(_snake_case ) > 0 and not set(_snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
_A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_A = lm_head_weights
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
UpperCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 2 |
import os
import pytest
from attr import dataclass
_a : int = 'us-east-1' # defaults region
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = 42
A = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
A = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5500,
}
A = {**hyperparameters, '''max_steps''': 1000}
@property
def lowerCamelCase_ ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCamelCase_ ( self ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCamelCase_ ( self ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCamelCase_ ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def UpperCamelCase__ ( _A: Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 479 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Optional[Any] = max_length
snake_case : List[Any] = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = input_ids.shape[-1]
snake_case : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
snake_case : Tuple = start_length
snake_case : List[str] = max_new_tokens
snake_case : Optional[Any] = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : List[str] = max_time
snake_case : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return any(criteria(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for criteria in self )
@property
def snake_case_ ( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
return None
def lowercase ( __A : StoppingCriteriaList , __A : int ) -> StoppingCriteriaList:
'''simple docstring'''
snake_case : List[Any] = stopping_criteria.max_length
snake_case : List[str] = deepcopy(__A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , __A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__A ) )
return new_stopping_criteria
| 315 |
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Dict = [0] * len(__A )
snake_case : int = []
snake_case : Optional[Any] = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
snake_case : Any = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case : Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
__lowercase : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 315 | 1 |
def __lowercase ( lowerCamelCase : str , lowerCamelCase : bool = False ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : Tuple = F"Expected string as input, found {type(lowerCamelCase )}"
raise ValueError(lowerCamelCase )
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = F"Expected boolean as use_pascal parameter, found {type(lowerCamelCase )}"
raise ValueError(lowerCamelCase )
UpperCamelCase_ : Tuple = input_str.split('_' )
UpperCamelCase_ : Optional[int] = 0 if use_pascal else 1
UpperCamelCase_ : List[Any] = words[start_index:]
UpperCamelCase_ : List[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase_ : Tuple = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 417 | import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self : List[str] , snake_case : int , snake_case : Optional[int]=1_3 , snake_case : List[str]=3_0 , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=3 , snake_case : List[Any]=True , snake_case : Union[str, Any]=True , snake_case : List[Any]=3_2 , snake_case : int=5 , snake_case : int=4 , snake_case : List[str]=3_7 , snake_case : Union[str, Any]="gelu" , snake_case : int=0.1 , snake_case : Dict=0.1 , snake_case : Any=1_0 , snake_case : Any=0.02 , snake_case : int=3 , snake_case : int=0.6 , snake_case : str=None , ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parent
UpperCamelCase_ : Optional[int] = batch_size
UpperCamelCase_ : Optional[Any] = image_size
UpperCamelCase_ : Optional[int] = patch_size
UpperCamelCase_ : List[str] = num_channels
UpperCamelCase_ : Optional[int] = is_training
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : str = hidden_size
UpperCamelCase_ : Union[str, Any] = num_hidden_layers
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : Optional[Any] = intermediate_size
UpperCamelCase_ : Optional[int] = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_ : List[str] = type_sequence_label_size
UpperCamelCase_ : List[str] = initializer_range
UpperCamelCase_ : Union[str, Any] = mask_ratio
UpperCamelCase_ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase_ : int = (image_size // patch_size) ** 2
UpperCamelCase_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Tuple = None
if self.use_labels:
UpperCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Dict , snake_case : Optional[int] , snake_case : str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ViTMAEModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[Any] , snake_case : Tuple , snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Optional[int] = model(snake_case )
UpperCamelCase_ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase_ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase_ : List[str] = 1
UpperCamelCase_ : Tuple = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : Optional[Any] = model(snake_case )
UpperCamelCase_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = config_and_inputs
UpperCamelCase_ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = ViTMAEModelTester(self )
UpperCamelCase_ : List[str] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[str] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
UpperCamelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Tuple ) -> int:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase_ : Optional[Any] = torch.from_numpy(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase_ : Optional[int] = pt_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(snake_case )
model.to(snake_case )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase_ : Any = outputs[0].cpu().numpy()
UpperCamelCase_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_ : Any = model_class.from_pretrained(snake_case )
model.to(snake_case )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
# Make sure we don't have nans
UpperCamelCase_ : int = after_outputs[0].cpu().numpy()
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Tuple = ViTMAEModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __lowercase ( ):
UpperCamelCase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Dict = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case )
UpperCamelCase_ : str = self.default_image_processor
UpperCamelCase_ : int = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase_ : Optional[Any] = ViTMAEConfig()
UpperCamelCase_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase_ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Tuple = model(**snake_case , noise=torch.from_numpy(snake_case ).to(device=snake_case ) )
# verify the logits
UpperCamelCase_ : Optional[int] = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCamelCase_ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case ) , atol=1e-4 ) )
| 417 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : Dict ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
lowerCAmelCase : Tuple = getattr(A_ ,A_ )
if weight_type is not None:
lowerCAmelCase : Union[str, Any] = getattr(A_ ,A_ ).shape
else:
lowerCAmelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
lowerCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
lowerCAmelCase : Tuple = value
elif weight_type == "bias":
lowerCAmelCase : Optional[Any] = value
else:
lowerCAmelCase : int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Union[str, Any] = fairseq_model.state_dict()
lowerCAmelCase : List[str] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
A_ ,A_ ,A_ ,A_ ,hf_model.config.feat_extract_norm == """group""" ,)
lowerCAmelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase : Any = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
lowerCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
lowerCAmelCase : int = name.split(A_ )[0].split(""".""" )[-2]
lowerCAmelCase : Union[str, Any] = mapped_key.replace("""*""" ,A_ )
if "weight_g" in name:
lowerCAmelCase : str = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase : str = """weight_v"""
elif "weight" in name:
lowerCAmelCase : List[Any] = """weight"""
elif "bias" in name:
lowerCAmelCase : str = """bias"""
else:
lowerCAmelCase : Union[str, Any] = None
set_recursively(A_ ,A_ ,A_ ,A_ ,A_ )
continue
if not is_used:
unused_weights.append(A_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase : Optional[int] = name.split(""".""" )
lowerCAmelCase : Optional[Any] = int(items[0] )
lowerCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A_ )
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
if config_path is not None:
lowerCAmelCase : str = HubertConfig.from_pretrained(A_ )
else:
lowerCAmelCase : Dict = HubertConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase : List[str] = Dictionary.load(A_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase : Tuple = target_dict.pad_index
lowerCAmelCase : Optional[Any] = target_dict.bos_index
lowerCAmelCase : List[str] = target_dict.eos_index
lowerCAmelCase : Optional[Any] = len(target_dict.symbols )
lowerCAmelCase : Optional[int] = os.path.join(A_ ,"""vocab.json""" )
if not os.path.isdir(A_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(A_ ) )
return
os.makedirs(A_ ,exist_ok=A_ )
with open(A_ ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices ,A_ )
lowerCAmelCase : str = WavaVecaCTCTokenizer(
A_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=A_ ,)
lowerCAmelCase : int = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6_0_0_0 ,padding_value=0 ,do_normalize=A_ ,return_attention_mask=A_ ,)
lowerCAmelCase : Tuple = WavaVecaProcessor(feature_extractor=A_ ,tokenizer=A_ )
processor.save_pretrained(A_ )
lowerCAmelCase : List[Any] = HubertForCTC(A_ )
else:
lowerCAmelCase : Any = HubertModel(A_ )
if is_finetuned:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase : Any = model[0].eval()
recursively_load_weights(A_ ,A_ ,A_ )
hf_wavavec.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase : Dict =parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 710 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 0 |
'''simple docstring'''
import json
import sys
def UpperCamelCase_ ( A__ , A__ ):
with open(A__ , encoding="""utf-8""" ) as f:
a_ = json.load(A__ )
a_ = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(A__ ):
a_ = results[benchmark_name]
a_ = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
a_ = """| metric |"""
a_ = """|--------|"""
a_ = """| new / old (diff) |"""
for metric_name in sorted(A__ ):
a_ = benchmark_res[metric_name]
a_ = metric_vals["""new"""]
a_ = metric_vals.get("""old""" , A__ )
a_ = metric_vals.get("""diff""" , A__ )
a_ = F''' {new_val:f}''' if isinstance(A__ , (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(A__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(A__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(A__ ) )
if __name__ == "__main__":
lowercase__ =sys.argv[1]
lowercase__ =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 263 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ , A__ ):
def count_of_possible_combinations(A__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(A__ )
def UpperCamelCase_ ( A__ , A__ , A__ ):
def count_of_possible_combinations_with_dp_array(
A__ , A__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a_ = sum(
count_of_possible_combinations_with_dp_array(target - item , A__ )
for item in array )
a_ = answer
return answer
a_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(A__ , A__ )
def UpperCamelCase_ ( A__ , A__ , A__ ):
a_ = [0] * (target + 1)
a_ = 1
for i in range(1 , target + 1 ):
for j in range(A__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =3
lowercase__ =5
lowercase__ =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 263 | 1 |
import math
import unittest
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def A_ (self ) -> str:
with self.assertRaises(A__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn\'t have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 700 | import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class UpperCamelCase ( __a ):
a__ :Any = 1
a__ :Union[str, Any] = 2
a__ :Union[str, Any] = 3
a__ :int = 4
a__ :int = 5
@dataclass
class UpperCamelCase ( __a ):
a__ :jnp.ndarray
class UpperCamelCase :
a__ :Union[str, Any] = SCHEDULER_CONFIG_NAME
a__ :Union[str, Any] = ['''dtype''']
a__ :str = []
a__ :Dict = True
@classmethod
def A_ (cls , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=False , **__UpperCamelCase , ) -> Optional[int]:
UpperCamelCase_,UpperCamelCase_ : Optional[int] = cls.load_config(
pretrained_model_name_or_path=__UpperCamelCase , subfolder=__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase , )
UpperCamelCase_,UpperCamelCase_ : Dict = cls.from_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase )
if hasattr(__UpperCamelCase , """create_state""" ) and getattr(__UpperCamelCase , """has_state""" , __UpperCamelCase ):
UpperCamelCase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def A_ (self , __UpperCamelCase , __UpperCamelCase = False , **__UpperCamelCase ) -> Union[str, Any]:
self.save_config(save_directory=__UpperCamelCase , push_to_hub=__UpperCamelCase , **__UpperCamelCase )
@property
def A_ (self ) -> Dict:
return self._get_compatibles()
@classmethod
def A_ (cls ) -> Dict:
UpperCamelCase_ : Tuple = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase_ : Any = importlib.import_module(__name__.split(""".""" )[0] )
UpperCamelCase_ : Dict = [
getattr(__UpperCamelCase , __UpperCamelCase ) for c in compatible_classes_str if hasattr(__UpperCamelCase , __UpperCamelCase )
]
return compatible_classes
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : Tuple[int] ):
assert len(_SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_SCREAMING_SNAKE_CASE ) - x.ndim) ) , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int]=0.999 , _SCREAMING_SNAKE_CASE : List[str]=jnp.floataa ):
def alpha_bar(_SCREAMING_SNAKE_CASE : int ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
UpperCamelCase_ : List[str] = []
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Optional[Any] = i / num_diffusion_timesteps
UpperCamelCase_ : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_SCREAMING_SNAKE_CASE ) / alpha_bar(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return jnp.array(_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class UpperCamelCase :
a__ :jnp.ndarray
a__ :jnp.ndarray
a__ :jnp.ndarray
@classmethod
def A_ (cls , __UpperCamelCase ) -> List[Any]:
UpperCamelCase_ : Optional[Any] = scheduler.config
if config.trained_betas is not None:
UpperCamelCase_ : Any = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase_ : Dict = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase_ : Dict = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase_ : Optional[int] = 1.0 - betas
UpperCamelCase_ : int = jnp.cumprod(__UpperCamelCase , axis=0 )
return cls(
alphas=__UpperCamelCase , betas=__UpperCamelCase , alphas_cumprod=__UpperCamelCase , )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ):
UpperCamelCase_ : Tuple = state.alphas_cumprod
UpperCamelCase_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase_ : Optional[int] = sqrt_alpha_prod.flatten()
UpperCamelCase_ : Any = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
UpperCamelCase_ : str = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase_ : List[Any] = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase_ : Any = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ):
UpperCamelCase_,UpperCamelCase_ : Optional[int] = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ):
UpperCamelCase_,UpperCamelCase_ : Dict = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : str = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 138 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "BlipImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :List[Any] , __A :str , __A :Dict , __A :List[str] ) -> Any:
"""simple docstring"""
super().__init__(__A , __A )
# add QFormer tokenizer
SCREAMING_SNAKE_CASE__ = qformer_tokenizer
def __call__( self :Any , __A :ImageInput = None , __A :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A :bool = True , __A :Union[bool, str, PaddingStrategy] = False , __A :Union[bool, str, TruncationStrategy] = None , __A :Optional[int] = None , __A :int = 0 , __A :Optional[int] = None , __A :Optional[bool] = None , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = True , __A :Optional[Union[str, TensorType]] = None , **__A :int , ) -> BatchFeature:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
SCREAMING_SNAKE_CASE__ = BatchFeature()
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
encoding.update(__A )
SCREAMING_SNAKE_CASE__ = self.qformer_tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
SCREAMING_SNAKE_CASE__ = qformer_text_encoding.pop("""input_ids""" )
SCREAMING_SNAKE_CASE__ = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A )
encoding.update(__A )
return encoding
def _snake_case ( self :Any , *__A :List[str] , **__A :List[str] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Tuple , *__A :str , **__A :Tuple ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _snake_case ( self :Any , __A :str , **__A :int ) -> str:
"""simple docstring"""
if os.path.isfile(__A ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__A , exist_ok=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(__A , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(__A )
return super().save_pretrained(__A , **__A )
@classmethod
def _snake_case ( cls :str , __A :Dict , **__A :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__A , subfolder="""qformer_tokenizer""" )
SCREAMING_SNAKE_CASE__ = cls._get_arguments_from_pretrained(__A , **__A )
args.append(__A )
return cls(*__A ) | 6 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
__magic_name__ = 1.5
__magic_name__ = int(factor * num_class_images )
__magic_name__ = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=__UpperCamelCase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
__magic_name__ = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
__magic_name__ = int(factor * num_images )
__magic_name__ = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = tqdm(desc='''downloading real regularization images''' , total=__UpperCamelCase )
with open(f'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(f'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
f'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
__magic_name__ = class_images[count]
count += 1
try:
__magic_name__ = requests.get(images['''url'''] )
if img.status_code == 200:
__magic_name__ = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase ( ) -> List[str]:
__magic_name__ = argparse.ArgumentParser('''''' , add_help=__UpperCamelCase )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
__lowerCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 490 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : Optional[Any] = '''pt'''
elif is_tf_available():
_lowerCamelCase : List[str] = '''tf'''
else:
_lowerCamelCase : List[str] = '''jax'''
class lowercase ( a , unittest.TestCase ):
lowercase__ : Union[str, Any] = ByTaTokenizer
lowercase__ : str = False
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def __snake_case( self : List[str] , **_UpperCamelCase : str ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Optional[int]=20 , _UpperCamelCase : Dict=5 ) -> Tuple[str, list]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(len(_UpperCamelCase ) ):
try:
SCREAMING_SNAKE_CASE = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE = list(filter(lambda _UpperCamelCase : re.match(R"^[ a-zA-Z]+$" , t[1] ) , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = list(filter(lambda _UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCamelCase ) , _UpperCamelCase ) )
if max_length is not None and len(_UpperCamelCase ) > max_length:
SCREAMING_SNAKE_CASE = toks[:max_length]
if min_length is not None and len(_UpperCamelCase ) < min_length and len(_UpperCamelCase ) > 0:
while len(_UpperCamelCase ) < min_length:
SCREAMING_SNAKE_CASE = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
if " " not in output_txt and len(_UpperCamelCase ) > 1:
SCREAMING_SNAKE_CASE = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCamelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCamelCase )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE = " " + output_txt
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
return output_txt, output_ids
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
SCREAMING_SNAKE_CASE = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = "Unicode €."
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , _UpperCamelCase )
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , "Unicode €.</s>" )
SCREAMING_SNAKE_CASE = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , _UpperCamelCase )
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , _UpperCamelCase )
self.assertIn("attention_mask" , _UpperCamelCase )
self.assertNotIn("decoder_input_ids" , _UpperCamelCase )
self.assertNotIn("decoder_attention_mask" , _UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE = tokenizer(
text_target=_UpperCamelCase , max_length=32 , padding="max_length" , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __snake_case( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ["A long paragraph for summarization. </s>"]
SCREAMING_SNAKE_CASE = ["Summary of the text. </s>"]
# fmt: off
SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase , text_target=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , batch["input_ids"][0] )
self.assertEqual(_UpperCamelCase , batch["labels"][0] )
def __snake_case( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = after_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(_UpperCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCamelCase )
def __snake_case( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE = json.load(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE = json.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [F"<extra_id_{i}>" for i in range(125 )]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
with open(os.path.join(_UpperCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
_UpperCamelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=_UpperCamelCase )]
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
_UpperCamelCase , additional_special_tokens=_UpperCamelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(_UpperCamelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> str:
'''simple docstring'''
pass
def __snake_case( self : Any ) -> int:
'''simple docstring'''
pass
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
pass
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizers(fast=_UpperCamelCase , do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_string(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(
_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
for attr in attributes_list:
setattr(_UpperCamelCase , attr + "_id" , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , attr + "_id" ) , _UpperCamelCase )
setattr(_UpperCamelCase , attr + "_id" , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(getattr(_UpperCamelCase , attr + "_id" ) , _UpperCamelCase )
setattr(_UpperCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_UpperCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_UpperCamelCase , "additional_special_tokens_ids" ) , [] )
setattr(_UpperCamelCase , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCamelCase , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 704 | import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 647 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( A__ ):
A__ = '''naver-clova-ix/donut-base-finetuned-docvqa'''
A__ = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
A__ = '''document_qa'''
A__ = AutoProcessor
A__ = VisionEncoderDecoderModel
A__ = ['''image''', '''text''']
A__ = ['''text''']
def __init__( self : int , *_a : Any , **_a : Optional[Any] ) -> Dict:
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*_a , **_a )
def A ( self : int , _a : "Image" , _a : str ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_SCREAMING_SNAKE_CASE =task_prompt.replace('{user_input}' , _a )
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenizer(
_a , add_special_tokens=_a , return_tensors='pt' ).input_ids
_SCREAMING_SNAKE_CASE =self.pre_processor(_a , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def A ( self : Optional[Any] , _a : Any ) -> List[str]:
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_a , ).sequences
def A ( self : Dict , _a : Optional[int] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.pre_processor.batch_decode(_a )[0]
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
_SCREAMING_SNAKE_CASE =re.sub(r'<.*?>' , '' , _a , count=1 ).strip() # remove first task start token
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenajson(_a )
return sequence["answer"]
| 405 | def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__, int(b / 2 ) ) * actual_power(UpperCamelCase__, int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__, int(b / 2 ) ) * actual_power(UpperCamelCase__, int(b / 2 ) )
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if b < 0:
return 1 / actual_power(UpperCamelCase__, UpperCamelCase__ )
return actual_power(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 240 | 0 |
'''simple docstring'''
import argparse
a = "docs/source/_static/js/custom.js"
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
with open(__UpperCAmelCase , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
__SCREAMING_SNAKE_CASE = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
a = parser.parse_args()
update_custom_js(args.version)
| 13 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 1 |
'''simple docstring'''
def A_ ( snake_case = 600851475143 ):
try:
SCREAMING_SNAKE_CASE:str = int(snake_case )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
SCREAMING_SNAKE_CASE:Optional[Any] = 1
SCREAMING_SNAKE_CASE:Optional[Any] = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE:List[Any] = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE:List[str] = n
return int(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 143 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Tuple = min(snake_case ) # min() finds the minimum value
SCREAMING_SNAKE_CASE:Union[str, Any] = max(snake_case ) # max() finds the maximum value
SCREAMING_SNAKE_CASE:List[str] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE:List[str] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(snake_case , snake_case ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE:Optional[int] = 0
for count in range(snake_case ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE:Optional[Any] = count + min_val
i += 1
def A_ ( ):
SCREAMING_SNAKE_CASE:Optional[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(snake_case )
print("Sorted order is:" , " ".join(snake_case ) )
if __name__ == "__main__":
main()
| 143 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = BlipImageProcessor()
_UpperCamelCase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
_UpperCamelCase = BlipaProcessor(__a , __a)
processor.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self , **__a) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a).tokenizer
def UpperCAmelCase ( self , **__a) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a).image_processor
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
_UpperCamelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
_UpperCamelCase = self.get_image_processor(do_normalize=__a , padding_value=1.0)
_UpperCamelCase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__a , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __a)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BlipaProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(__a , return_tensors='''np''')
_UpperCamelCase = processor(images=__a , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BlipaProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=__a)
_UpperCamelCase = tokenizer(__a , return_token_type_ids=__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BlipaProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__a , images=__a)
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''input_ids''', '''attention_mask'''])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BlipaProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(__a)
_UpperCamelCase = tokenizer.batch_decode(__a)
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BlipaProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__a , images=__a)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''input_ids''', '''attention_mask'''])
| 715 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=None , __a=True , ) -> int:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 20}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_flip_channel_order
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTImageProcessingTester(self)
@property
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''size'''))
self.assertTrue(hasattr(__a , '''do_center_crop'''))
self.assertTrue(hasattr(__a , '''center_crop'''))
self.assertTrue(hasattr(__a , '''do_flip_channel_order'''))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 78 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = LEDTokenizer
lowerCAmelCase = LEDTokenizerFast
lowerCAmelCase = True
def __A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase = {"unk_token": "<unk>"}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def __A ( self : Any , **SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] , **SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __A ( self : int ) -> List[str]:
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __A ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __A ( self : int ) -> str:
"""simple docstring"""
lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE , max_length=len(SCREAMING_SNAKE_CASE ) , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_torch
def __A ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIn("input_ids" , SCREAMING_SNAKE_CASE )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE )
self.assertNotIn("labels" , SCREAMING_SNAKE_CASE )
self.assertNotIn("decoder_attention_mask" , SCREAMING_SNAKE_CASE )
@require_torch
def __A ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=SCREAMING_SNAKE_CASE , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def __A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = ["A long paragraph for summarization."]
lowerCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowerCAmelCase = tokenizer(text_target=SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowerCAmelCase = inputs["input_ids"]
lowerCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = ["Summary of the text.", "Another summary."]
lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
lowerCAmelCase = [[0] * len(SCREAMING_SNAKE_CASE ) for x in encoded_output["input_ids"]]
lowerCAmelCase = tokenizer.pad(SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs["global_attention_mask"] , SCREAMING_SNAKE_CASE )
def __A ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def __A ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase = "A, <mask> AllenNLP sentence."
lowerCAmelCase = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 649 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "width_multiplier" ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=1_3 , SCREAMING_SNAKE_CASE : int=6_4 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Dict="swish" , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Any=3_2 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : int=1_0 , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Any]=0.2_5 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : int=0.0 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = make_divisible(5_1_2 * width_multiplier , divisor=8 )
lowerCAmelCase = hidden_act
lowerCAmelCase = conv_kernel_size
lowerCAmelCase = output_stride
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = use_labels
lowerCAmelCase = is_training
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
lowerCAmelCase = width_multiplier
lowerCAmelCase = ffn_dropout
lowerCAmelCase = attn_dropout
def __A ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : int ) -> Dict:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase = MobileViTVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __A ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = MobileViTVaModelTester(self )
lowerCAmelCase = MobileViTVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __A ( self : str ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __A ( self : Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __A ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __A ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def __A ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase = 2
for i in range(len(SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __A ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE )
@slow
def __A ( self : Any ) -> Dict:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __a ( ) -> List[Any]:
lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : str ) -> Any:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __A ( self : List[str] ) -> int:
"""simple docstring"""
lowerCAmelCase = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def __A ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = model.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits
# verify the logits
lowerCAmelCase = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def __A ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = model.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits.detach().cpu()
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE , target_sizes=[(5_0, 6_0)] )
lowerCAmelCase = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE )
| 649 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( _a , _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = FunnelConfig.from_json_file(_a )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase = FunnelBaseModel(_a ) if base_model else FunnelModel(_a )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_a , _a , _a )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 297 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_a ) == len(_a ), F'''{len(_a )} != {len(_a )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_UpperCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_UpperCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
try:
_lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(_a ) )
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(_a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _lowerCamelCase ( _a , _a = "student" , _a = None , _a = None , _a=False , _a=None , _a=None , **_a , ):
"""simple docstring"""
_lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(_a , _a ):
AutoTokenizer.from_pretrained(_a ).save_pretrained(_a ) # purely for convenience
_lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_a ).eval()
else:
assert isinstance(_a , _a ), F'''teacher must be a model or string got type {type(_a )}'''
_lowerCamelCase = teacher.config.to_diff_dict()
try:
_lowerCamelCase , _lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_lowerCamelCase = teacher_e
if d is None:
_lowerCamelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
_lowerCamelCase , _lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_lowerCamelCase , _lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_lowerCamelCase = teacher_e
if d is None:
_lowerCamelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_a )
# Copy weights
_lowerCamelCase = teacher.config_class(**_a )
_lowerCamelCase = AutoModelForSeqaSeqLM.from_config(_a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=_a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_lowerCamelCase , _lowerCamelCase = list(range(_a ) ), list(range(_a ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(_a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_lowerCamelCase = pick_layers_to_copy(_a , _a )
if d_layers_to_copy is None:
_lowerCamelCase = pick_layers_to_copy(_a , _a )
try:
if hasattr(
_a , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _a )
copy_layers(teacher.decoder.block , student.decoder.block , _a )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_lowerCamelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(_a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 297 | 1 |
'''simple docstring'''
from __future__ import annotations
a_ : List[Any] = 1.6021e-19 # units = C
def a_ ( __snake_case : float , __snake_case : float , __snake_case : float , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : List[Any] = 'falcon'
__lowerCAmelCase : str = ['past_key_values']
def __init__( self , _A=65024 , _A=4544 , _A=32 , _A=71 , _A=1E-5 , _A=0.0_2 , _A=True , _A=0.0 , _A=0.0 , _A=None , _A=False , _A=False , _A=True , _A=True , _A=False , _A=11 , _A=11 , **_A , ):
SCREAMING_SNAKE_CASE_ = vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE_ = kwargs.pop('n_embed' , _A)
SCREAMING_SNAKE_CASE_ = hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = num_attention_heads if num_kv_heads is None else num_kv_heads
SCREAMING_SNAKE_CASE_ = alibi
SCREAMING_SNAKE_CASE_ = new_decoder_architecture
SCREAMING_SNAKE_CASE_ = multi_query # Ignored when new_decoder_architecture is True
SCREAMING_SNAKE_CASE_ = parallel_attn
SCREAMING_SNAKE_CASE_ = bias
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A)
@property
def lowerCAmelCase__ ( self):
return self.hidden_size // self.num_attention_heads
@property
def lowerCAmelCase__ ( self):
return not self.alibi
| 703 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __snake_case :
def __init__( self , _A , _A , _A , _A , _A , _A=0.2 , _A=0.2):
SCREAMING_SNAKE_CASE_ = bp_numa
SCREAMING_SNAKE_CASE_ = bp_numa
SCREAMING_SNAKE_CASE_ = bp_numa
SCREAMING_SNAKE_CASE_ = conva_get[:2]
SCREAMING_SNAKE_CASE_ = conva_get[2]
SCREAMING_SNAKE_CASE_ = size_pa
SCREAMING_SNAKE_CASE_ = rate_w
SCREAMING_SNAKE_CASE_ = rate_t
SCREAMING_SNAKE_CASE_ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
SCREAMING_SNAKE_CASE_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.conva[1]) + 1
SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1
SCREAMING_SNAKE_CASE_ = -2 * np.random.rand(self.num_bpa) + 1
def lowerCAmelCase__ ( self , _A):
# save model dict with pickle
SCREAMING_SNAKE_CASE_ = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(_A , 'wb') as f:
pickle.dump(_A , _A)
print(f"""Model saved: {save_path}""")
@classmethod
def lowerCAmelCase__ ( cls , _A):
# read saved model
with open(_A , 'rb') as f:
SCREAMING_SNAKE_CASE_ = pickle.load(_A) # noqa: S301
SCREAMING_SNAKE_CASE_ = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
SCREAMING_SNAKE_CASE_ = model_dic.get('size_pooling1')
SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp1')
SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp2')
SCREAMING_SNAKE_CASE_ = model_dic.get('num_bp3')
SCREAMING_SNAKE_CASE_ = model_dic.get('rate_weight')
SCREAMING_SNAKE_CASE_ = model_dic.get('rate_thre')
# create model instance
SCREAMING_SNAKE_CASE_ = CNN(_A , _A , _A , _A , _A , _A , _A)
# modify model parameter
SCREAMING_SNAKE_CASE_ = model_dic.get('w_conv1')
SCREAMING_SNAKE_CASE_ = model_dic.get('wkj')
SCREAMING_SNAKE_CASE_ = model_dic.get('vji')
SCREAMING_SNAKE_CASE_ = model_dic.get('thre_conv1')
SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp2')
SCREAMING_SNAKE_CASE_ = model_dic.get('thre_bp3')
return conv_ins
def lowerCAmelCase__ ( self , _A):
return 1 / (1 + np.exp(-1 * x))
def lowerCAmelCase__ ( self , _A):
return round(_A , 3)
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A):
# convolution process
SCREAMING_SNAKE_CASE_ = convs[0]
SCREAMING_SNAKE_CASE_ = convs[1]
SCREAMING_SNAKE_CASE_ = np.shape(_A)[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE_ = []
for i_focus in range(0 , size_data - size_conv + 1 , _A):
for j_focus in range(0 , size_data - size_conv + 1 , _A):
SCREAMING_SNAKE_CASE_ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_A)
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = int((size_data - size_conv) / conv_step + 1)
for i_map in range(_A):
SCREAMING_SNAKE_CASE_ = []
for i_focus in range(len(_A)):
SCREAMING_SNAKE_CASE_ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(_A))
SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(
_A , _A)
data_featuremap.append(_A)
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE_ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_A))
SCREAMING_SNAKE_CASE_ = np.asarray(_A)
return focus_list, data_featuremap
def lowerCAmelCase__ ( self , _A , _A , _A="average_pool"):
# pooling process
SCREAMING_SNAKE_CASE_ = len(featuremaps[0])
SCREAMING_SNAKE_CASE_ = int(size_map / size_pooling)
SCREAMING_SNAKE_CASE_ = []
for i_map in range(len(_A)):
SCREAMING_SNAKE_CASE_ = featuremaps[i_map]
SCREAMING_SNAKE_CASE_ = []
for i_focus in range(0 , _A , _A):
for j_focus in range(0 , _A , _A):
SCREAMING_SNAKE_CASE_ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_A))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_A))
SCREAMING_SNAKE_CASE_ = np.asmatrix(_A).reshape(_A , _A)
featuremap_pooled.append(_A)
return featuremap_pooled
def lowerCAmelCase__ ( self , _A):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A)):
SCREAMING_SNAKE_CASE_ = np.shape(data[i])
SCREAMING_SNAKE_CASE_ = data[i].reshape(1 , shapes[0] * shapes[1])
SCREAMING_SNAKE_CASE_ = data_listed.getA().tolist()[0]
data_expanded.extend(_A)
SCREAMING_SNAKE_CASE_ = np.asarray(_A)
return data_expanded
def lowerCAmelCase__ ( self , _A):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE_ = np.asarray(_A)
SCREAMING_SNAKE_CASE_ = np.shape(_A)
SCREAMING_SNAKE_CASE_ = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
for i_map in range(_A):
SCREAMING_SNAKE_CASE_ = np.ones((size_map, size_map))
for i in range(0 , _A , _A):
for j in range(0 , _A , _A):
SCREAMING_SNAKE_CASE_ = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE_ = i_pool + 1
SCREAMING_SNAKE_CASE_ = np.multiply(
_A , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(_A)
return pd_all
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A , _A=bool):
# model traning
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(_A)))
print((' - - Shape: Teach_Data ', np.shape(_A)))
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 10000
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE_ = 0
print(f"""-------------Learning Time {rp}--------------""")
for p in range(len(_A)):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_train[p])
SCREAMING_SNAKE_CASE_ = np.asarray(datas_teach[p])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga)
SCREAMING_SNAKE_CASE_ = np.shape(_A)
SCREAMING_SNAKE_CASE_ = self._expand(_A)
SCREAMING_SNAKE_CASE_ = data_bp_input
SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji.T) - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
SCREAMING_SNAKE_CASE_ = np.dot(_A , self.wkj.T) - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE_ = np.multiply(
(data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa)))
SCREAMING_SNAKE_CASE_ = np.multiply(
np.dot(_A , self.wkj) , np.multiply(_A , (1 - bp_outa)))
SCREAMING_SNAKE_CASE_ = np.dot(_A , self.vji)
SCREAMING_SNAKE_CASE_ = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE_ = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE_ = self._calculate_gradient_from_pool(
_A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
SCREAMING_SNAKE_CASE_ = self._expand_mat(pd_conva_all[k_conv])
SCREAMING_SNAKE_CASE_ = self.rate_weight * np.dot(_A , _A)
SCREAMING_SNAKE_CASE_ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
SCREAMING_SNAKE_CASE_ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE_ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE_ = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE_ = rp + 1
SCREAMING_SNAKE_CASE_ = error_count / patterns
all_mse.append(_A)
def draw_error():
SCREAMING_SNAKE_CASE_ = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(_A , '+-')
plt.plot(_A , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(_A , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}"""))
if draw_e:
draw_error()
return mse
def lowerCAmelCase__ ( self , _A):
# model predict
SCREAMING_SNAKE_CASE_ = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(_A)))
for p in range(len(_A)):
SCREAMING_SNAKE_CASE_ = np.asmatrix(datas_test[p])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga)
SCREAMING_SNAKE_CASE_ = self._expand(_A)
SCREAMING_SNAKE_CASE_ = data_bp_input
SCREAMING_SNAKE_CASE_ = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
SCREAMING_SNAKE_CASE_ = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE_ = self.sig(_A)
produce_out.extend(bp_outa.getA().tolist())
SCREAMING_SNAKE_CASE_ = [list(map(self.do_round , _A)) for each in produce_out]
return np.asarray(_A)
def lowerCAmelCase__ ( self , _A):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE_ = np.asmatrix(_A)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.convolute(
_A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE_ = self.pooling(_A , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 620 | 0 |
from __future__ import annotations
import queue
class snake_case_ :
'''simple docstring'''
def __init__( self : int , __magic_name__ : Any ) -> Optional[int]:
lowerCamelCase_ : Tuple = data
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : str = None
def __a ( ) -> Optional[Any]:
"""simple docstring"""
print("\n********Press N to stop entering at any point of time********\n" )
lowerCamelCase_ : Union[str, Any] = input("Enter the value of the root node: " ).strip().lower()
lowerCamelCase_ : Optional[Any] = queue.Queue()
lowerCamelCase_ : List[Any] = TreeNode(int(UpperCamelCase__ ) )
q.put(UpperCamelCase__ )
while not q.empty():
lowerCamelCase_ : List[str] = q.get()
lowerCamelCase_ : int = f"Enter the left node of {node_found.data}: "
lowerCamelCase_ : str = input(UpperCamelCase__ ).strip().lower() or "n"
if check == "n":
return tree_node
lowerCamelCase_ : Union[str, Any] = TreeNode(int(UpperCamelCase__ ) )
lowerCamelCase_ : int = left_node
q.put(UpperCamelCase__ )
lowerCamelCase_ : Any = f"Enter the right node of {node_found.data}: "
lowerCamelCase_ : str = input(UpperCamelCase__ ).strip().lower() or "n"
if check == "n":
return tree_node
lowerCamelCase_ : str = TreeNode(int(UpperCamelCase__ ) )
lowerCamelCase_ : Optional[Any] = right_node
q.put(UpperCamelCase__ )
raise
def __a ( __UpperCAmelCase : TreeNode ) -> int:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def __a ( __UpperCAmelCase : TreeNode ) -> Dict:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def __a ( __UpperCAmelCase : TreeNode ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def __a ( __UpperCAmelCase : TreeNode ) -> List[Any]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
lowerCamelCase_ : Optional[Any] = queue.Queue()
q.put(UpperCamelCase__ )
while not q.empty():
lowerCamelCase_ : List[Any] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __a ( __UpperCAmelCase : TreeNode ) -> int:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
lowerCamelCase_ : Tuple = queue.Queue()
q.put(UpperCamelCase__ )
while not q.empty():
lowerCamelCase_ : Dict = []
while not q.empty():
lowerCamelCase_ : Tuple = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCamelCase__ )
def __a ( __UpperCAmelCase : TreeNode ) -> List[str]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Dict = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(UpperCamelCase__ )
lowerCamelCase_ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase_ : Union[str, Any] = stack.pop()
# start to traverse its right child
lowerCamelCase_ : List[str] = n.right
def __a ( __UpperCAmelCase : TreeNode ) -> Optional[int]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
lowerCamelCase_ : Any = []
lowerCamelCase_ : str = node
while n or stack:
while n:
stack.append(UpperCamelCase__ )
lowerCamelCase_ : Dict = n.left
lowerCamelCase_ : List[str] = stack.pop()
print(n.data , end="," )
lowerCamelCase_ : str = n.right
def __a ( __UpperCAmelCase : TreeNode ) -> List[Any]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = [], []
lowerCamelCase_ : str = node
stacka.append(UpperCamelCase__ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase_ : Dict = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCamelCase__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def __a ( __UpperCAmelCase : str = "" , __UpperCAmelCase : Dict=50 , __UpperCAmelCase : Dict="*" ) -> Any:
"""simple docstring"""
if not s:
return "\n" + width * char
lowerCamelCase_ , lowerCamelCase_ : int = divmod(width - len(UpperCamelCase__ ) - 2 , 2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
snake_case_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 488 | '''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCAmelCase ( UpperCamelCase__ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def lowerCAmelCase ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ):
"""simple docstring"""
__UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(UpperCamelCase__ , UpperCamelCase__ )
# Predict target for test data
__UpperCAmelCase = xgb.predict(UpperCamelCase__ )
__UpperCAmelCase = predictions.reshape(len(UpperCamelCase__ ) , 1 )
return predictions
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = fetch_california_housing()
__UpperCAmelCase , __UpperCAmelCase = data_handling(UpperCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(
UpperCamelCase__ , UpperCamelCase__ , test_size=0.25 , random_state=1 )
__UpperCAmelCase = xgboost(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
print(f"""Mean Square Error : {mean_squared_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 262 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def snake_case ( A__ = 1_50_00_00 ):
UpperCAmelCase_ : defaultdict = defaultdict(a_ )
UpperCAmelCase_ : Optional[Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 ,a_ ,2 ):
if gcd(a_ ,a_ ) > 1:
continue
UpperCAmelCase_ : Dict = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(a_ ,limit + 1 ,a_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'{solution() = }')
| 705 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ : Tuple = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ : Any = model(lowerCAmelCase_ )["last_hidden_state"]
UpperCAmelCase_ : str = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 463 | 0 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ : Dict = str(bin(A__ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Tuple = str(bin(A__ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Optional[int] = max(len(A__ ) ,len(A__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) ,b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Optional[int] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = DebertaVaTokenizer
snake_case__ = DebertaVaTokenizerFast
snake_case__ = True
snake_case__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 'this is a test'
UpperCamelCase = 'this is a test'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = '<pad>'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 3_0001 )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = ' \tHeLLo!how \n Are yoU? '
UpperCamelCase = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ' \tHeLLo!how \n Are yoU? '
UpperCamelCase = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'This is a test'
UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
UpperCamelCase = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
UpperCamelCase = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# fmt: off
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
UpperCamelCase = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode('sequence builders' )
UpperCamelCase = tokenizer.encode('multi-sequence build' )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _SCREAMING_SNAKE_CASE )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _SCREAMING_SNAKE_CASE , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 280 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Optional[Any] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 700 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 649 | 0 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCamelCase_ , n - 1 , lowerCamelCase_ ) * a) % mod
else:
_lowercase : str = binary_exponentiation(lowerCamelCase_ , n / 2 , lowerCamelCase_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE : str = 701
SCREAMING_SNAKE_CASE : Optional[int] = 1000000000
SCREAMING_SNAKE_CASE : Optional[int] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 89 |
'''simple docstring'''
def __lowerCamelCase ( UpperCAmelCase_ = 10_00 ) ->int:
return sum(e for e in range(3 , UpperCAmelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368 | 0 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def UpperCamelCase_ ( lowerCamelCase : bytes ) -> bytes:
"""simple docstring"""
if len(lowerCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__magic_name__ : Any = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCamelCase_ ( lowerCamelCase : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
__magic_name__ : Any = format(lowerCamelCase , '''08x''' )[-8:]
__magic_name__ : List[str] = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def UpperCamelCase_ ( lowerCamelCase : bytes ) -> bytes:
"""simple docstring"""
__magic_name__ : Any = b''''''
for char in message:
bit_string += format(lowerCamelCase , '''08b''' ).encode('''utf-8''' )
__magic_name__ : Any = format(len(lowerCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCamelCase_ ( lowerCamelCase : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowerCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(lowerCamelCase ) , 512 ):
__magic_name__ : Any = bit_string[pos : pos + 512]
__magic_name__ : Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCamelCase_ ( lowerCamelCase : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
__magic_name__ : Dict = format(lowerCamelCase , '''032b''' )
__magic_name__ : List[str] = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowerCamelCase , 2 )
def UpperCamelCase_ ( lowerCamelCase : int , lowerCamelCase : int ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def UpperCamelCase_ ( lowerCamelCase : int , lowerCamelCase : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCamelCase_ ( lowerCamelCase : bytes ) -> bytes:
"""simple docstring"""
__magic_name__ : str = preprocess(lowerCamelCase )
__magic_name__ : Tuple = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__magic_name__ : Tuple = 0X67_452_301
__magic_name__ : Tuple = 0XEF_CDA_B89
__magic_name__ : Dict = 0X98_BAD_CFE
__magic_name__ : Optional[int] = 0X10_325_476
__magic_name__ : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowerCamelCase ):
__magic_name__ : List[Any] = aa
__magic_name__ : Any = ba
__magic_name__ : Optional[Any] = ca
__magic_name__ : List[str] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__magic_name__ : Dict = d ^ (b & (c ^ d))
__magic_name__ : Any = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__magic_name__ : List[Any] = c ^ (d & (b ^ c))
__magic_name__ : str = (5 * i + 1) % 16
elif i <= 47:
__magic_name__ : Optional[Any] = b ^ c ^ d
__magic_name__ : Optional[int] = (3 * i + 5) % 16
else:
__magic_name__ : List[Any] = c ^ (b | not_aa(lowerCamelCase ))
__magic_name__ : Union[str, Any] = (7 * i) % 16
__magic_name__ : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
__magic_name__ : Union[str, Any] = d
__magic_name__ : Dict = c
__magic_name__ : Any = b
__magic_name__ : Tuple = sum_aa(lowerCamelCase , left_rotate_aa(lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__magic_name__ : List[str] = sum_aa(lowerCamelCase , lowerCamelCase )
__magic_name__ : Optional[int] = sum_aa(lowerCamelCase , lowerCamelCase )
__magic_name__ : Optional[int] = sum_aa(lowerCamelCase , lowerCamelCase )
__magic_name__ : Dict = sum_aa(lowerCamelCase , lowerCamelCase )
__magic_name__ : Dict = reformat_hex(lowerCamelCase ) + reformat_hex(lowerCamelCase ) + reformat_hex(lowerCamelCase ) + reformat_hex(lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A = datasets.utils.logging.get_logger(__name__)
A = ["""names""", """prefix"""]
A = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
A = ["""encoding_errors""", """on_bad_lines"""]
A = ["""date_format"""]
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case_ = ","
snake_case_ = None
snake_case_ = "infer"
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = False
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = None
snake_case_ = "."
snake_case_ = None
snake_case_ = '"'
snake_case_ = 0
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = None
snake_case_ = 1_0_0_0_0
snake_case_ = None
snake_case_ = "strict"
snake_case_ = "error"
snake_case_ = None
def _UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
__magic_name__ : Optional[Any] = self.delimiter
if self.column_names is not None:
__magic_name__ : Union[str, Any] = self.column_names
@property
def _UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
snake_case_ = CsvConfig
def _UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _UpperCAmelCase ( self : Union[str, Any] , snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__magic_name__ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
__magic_name__ : List[str] = data_files
if isinstance(snake_case , snake_case ):
__magic_name__ : Optional[int] = [files]
__magic_name__ : List[Any] = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__magic_name__ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
__magic_name__ : List[Any] = [files]
__magic_name__ : Optional[int] = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={'''files''': files} ) )
return splits
def _UpperCAmelCase ( self : List[Any] , snake_case : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
__magic_name__ : int = self.config.features.arrow_schema
if all(not require_storage_cast(snake_case ) for feature in self.config.features.values() ):
# cheaper cast
__magic_name__ : List[str] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__magic_name__ : Optional[int] = table_cast(snake_case , snake_case )
return pa_table
def _UpperCAmelCase ( self : Optional[int] , snake_case : Any ) -> Any:
'''simple docstring'''
__magic_name__ : str = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__magic_name__ : List[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
__magic_name__ : int = pd.read_csv(snake_case , iterator=snake_case , dtype=snake_case , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(snake_case ):
__magic_name__ : str = pa.Table.from_pandas(snake_case )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(snake_case )}: {e}""" )
raise
| 147 | 1 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = StableDiffusionControlNetImgaImgPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(lowerCAmelCase_ )
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=0 ) -> List[Any]:
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_a = torch.manual_seed(lowerCAmelCase_ )
else:
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_a = 2
_a = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , )
_a = floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = StableDiffusionControlNetImgaImgPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase_ : List[str] ):
if isinstance(lowerCAmelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase_ )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase_ )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(lowerCAmelCase_ )
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = MultiControlNetModel([controlneta, controlneta] )
_a = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_a = torch.manual_seed(lowerCAmelCase_ )
else:
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_a = 2
_a = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , ),
]
_a = floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
_a = 1_0.0
_a = 4
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ )[0]
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_a = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
_a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ , controlnet=lowerCAmelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a = '''evil space-punk bird'''
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) )
_a = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) )
_a = pipe(
lowerCAmelCase_ , lowerCAmelCase_ , control_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 22 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self : Optional[int] , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =order
# a_{0} ... a_{k}
_SCREAMING_SNAKE_CASE =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_SCREAMING_SNAKE_CASE =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_SCREAMING_SNAKE_CASE =[0.0] * self.order
# y[n-1] ... y[n-k]
_SCREAMING_SNAKE_CASE =[0.0] * self.order
def A ( self : List[Any] , _a : list[float] , _a : list[float] ) -> None:
'''simple docstring'''
if len(_a ) < self.order:
_SCREAMING_SNAKE_CASE =[1.0, *a_coeffs]
if len(_a ) != self.order + 1:
_SCREAMING_SNAKE_CASE =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(_a )}"
)
raise ValueError(_a )
if len(_a ) != self.order + 1:
_SCREAMING_SNAKE_CASE =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(_a )}"
)
raise ValueError(_a )
_SCREAMING_SNAKE_CASE =a_coeffs
_SCREAMING_SNAKE_CASE =b_coeffs
def A ( self : Union[str, Any] , _a : float ) -> float:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_SCREAMING_SNAKE_CASE =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_SCREAMING_SNAKE_CASE =self.input_history[:-1]
_SCREAMING_SNAKE_CASE =self.output_history[:-1]
_SCREAMING_SNAKE_CASE =sample
_SCREAMING_SNAKE_CASE =result
return result
| 405 | 0 |
import argparse
import os
import re
__SCREAMING_SNAKE_CASE = 'src/diffusers'
# Pattern that looks at the indentation in a line.
__SCREAMING_SNAKE_CASE = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__SCREAMING_SNAKE_CASE = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__SCREAMING_SNAKE_CASE = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__SCREAMING_SNAKE_CASE = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__SCREAMING_SNAKE_CASE = re.compile(r'\[([^\]]+)\]')
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =_re_indent.search(lowercase__ )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : int="" ,lowerCAmelCase_ : Any=None ,lowerCAmelCase_ : int=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =0
SCREAMING_SNAKE_CASE_ : List[Any] =code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowercase__ ):
index += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =['\n'.join(lines[:index] )]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
SCREAMING_SNAKE_CASE_ : Dict =[lines[index]]
index += 1
while index < len(lowercase__ ) and (end_prompt is None or not lines[index].startswith(lowercase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowercase__ ) )
if index < len(lowercase__ ) - 1:
SCREAMING_SNAKE_CASE_ : Dict =[lines[index + 1]]
index += 1
else:
SCREAMING_SNAKE_CASE_ : List[str] =[]
else:
blocks.append('\n'.join(lowercase__ ) )
SCREAMING_SNAKE_CASE_ : Any =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase__ ) > 0:
blocks.append('\n'.join(lowercase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
def _inner(lowerCAmelCase_ : List[str] ):
return key(lowercase__ ).lower().replace('_' ,'' )
return _inner
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Tuple=None ):
"""simple docstring"""
def noop(lowerCAmelCase_ : Optional[Any] ):
return x
if key is None:
SCREAMING_SNAKE_CASE_ : Any =noop
# Constants are all uppercase, they go first.
SCREAMING_SNAKE_CASE_ : Any =[obj for obj in objects if key(lowercase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
SCREAMING_SNAKE_CASE_ : Optional[int] =[obj for obj in objects if key(lowercase__ )[0].isupper() and not key(lowercase__ ).isupper()]
# Functions begin with a lowercase, they go last.
SCREAMING_SNAKE_CASE_ : int =[obj for obj in objects if not key(lowercase__ )[0].isupper()]
SCREAMING_SNAKE_CASE_ : Any =ignore_underscore(lowercase__ )
return sorted(lowercase__ ,key=lowercase__ ) + sorted(lowercase__ ,key=lowercase__ ) + sorted(lowercase__ ,key=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ):
"""simple docstring"""
def _replace(lowerCAmelCase_ : str ):
SCREAMING_SNAKE_CASE_ : Any =match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
SCREAMING_SNAKE_CASE_ : int =[part.strip().replace('\"' ,'' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE_ : Tuple =keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase__ )] ) + "]"
SCREAMING_SNAKE_CASE_ : List[str] =import_statement.split('\n' )
if len(lowercase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
SCREAMING_SNAKE_CASE_ : Tuple =2 if lines[1].strip() == '[' else 1
SCREAMING_SNAKE_CASE_ : Any =[(i, _re_strip_line.search(lowercase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
SCREAMING_SNAKE_CASE_ : Dict =sort_objects(lowercase__ ,key=lambda lowerCAmelCase_ : x[1] )
SCREAMING_SNAKE_CASE_ : Optional[int] =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
SCREAMING_SNAKE_CASE_ : str =_re_bracket_content.sub(_replace ,lines[1] )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] =[part.strip().replace('\"' ,'' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] =keys[:-1]
SCREAMING_SNAKE_CASE_ : Dict =get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(lowercase__ )] )
return "\n".join(lowercase__ )
else:
# Finally we have to deal with imports fitting on one line
SCREAMING_SNAKE_CASE_ : Dict =_re_bracket_content.sub(_replace ,lowercase__ )
return import_statement
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : List[Any]=True ):
"""simple docstring"""
with open(lowercase__ ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
SCREAMING_SNAKE_CASE_ : Any =split_code_in_indented_blocks(
lowercase__ ,start_prompt='_import_structure = {' ,end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(lowercase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
SCREAMING_SNAKE_CASE_ : Dict =main_blocks[block_idx]
SCREAMING_SNAKE_CASE_ : int =block.split('\n' )
# Get to the start of the imports.
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0
while line_idx < len(lowercase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
SCREAMING_SNAKE_CASE_ : Tuple =len(lowercase__ )
else:
line_idx += 1
if line_idx >= len(lowercase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='\n'.join(block_lines[line_idx:-1] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
SCREAMING_SNAKE_CASE_ : Optional[Any] =split_code_in_indented_blocks(lowercase__ ,indent_level=lowercase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
SCREAMING_SNAKE_CASE_ : str =_re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
SCREAMING_SNAKE_CASE_ : Optional[Any] =[(pattern.search(lowercase__ ).groups()[0] if pattern.search(lowercase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[(i, key) for i, key in enumerate(lowercase__ ) if key is not None]
SCREAMING_SNAKE_CASE_ : List[Any] =[x[0] for x in sorted(lowercase__ ,key=lambda lowerCAmelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
SCREAMING_SNAKE_CASE_ : int =0
SCREAMING_SNAKE_CASE_ : Dict =[]
for i in range(len(lowercase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
SCREAMING_SNAKE_CASE_ : List[str] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase__ )
count += 1
# And we put our main block back together with its first and last line.
SCREAMING_SNAKE_CASE_ : Optional[Any] ='\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase__ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowercase__ ,'w' ) as f:
f.write('\n'.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[Any]=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =[]
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE_ : str =sort_imports(os.path.join(lowercase__ ,'__init__.py' ) ,check_only=lowercase__ )
if result:
SCREAMING_SNAKE_CASE_ : List[Any] =[os.path.join(lowercase__ ,'__init__.py' )]
if len(lowercase__ ) > 0:
raise ValueError(F"""Would overwrite {len(lowercase__ )} files, run `make style`.""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 719 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = StableDiffusionDiffEditPipeline
_lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase = frozenset([] )
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_zero=__UpperCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE_ : str =CLIPTextModel(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Tuple ={
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : Any =floats_tensor((1, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Any =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] ={
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : List[Any] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[Any] =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : List[Any] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Dict =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] ={
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
if not hasattr(self.pipeline_class , '_optional_components' ):
return
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =pipe(**__UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.pipeline_class.from_pretrained(__UpperCAmelCase )
pipe_loaded.to(__UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__UpperCAmelCase , __UpperCAmelCase ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe_loaded(**__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : str =np.abs(output - output_loaded ).max()
self.assertLess(__UpperCAmelCase , 1E-4 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] ='cpu'
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_mask_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
SCREAMING_SNAKE_CASE_ : str =np.array([0] * 9 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int ='cpu'
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =self.get_dummy_inversion_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe.invert(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : str =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE_ : Tuple =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
SCREAMING_SNAKE_CASE_ : int =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int ='cpu'
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Dict ={'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE_ : str =DPMSolverMultistepScheduler(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =DPMSolverMultistepInverseScheduler(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_dummy_inversion_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =pipe.invert(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowerCamelCase ( cls ):
SCREAMING_SNAKE_CASE_ : Tuple =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE_ : Any =raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE_ : Optional[int] =raw_image
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Dict =DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[Any] =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] ='a bowl of fruit'
SCREAMING_SNAKE_CASE_ : Optional[int] ='a bowl of pears'
SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCAmelCase , target_prompt=__UpperCAmelCase , generator=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe.invert(
prompt=__UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCAmelCase ).latents
SCREAMING_SNAKE_CASE_ : List[Any] =pipe(
prompt=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_latents=__UpperCAmelCase , generator=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Any =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str ='a bowl of fruit'
SCREAMING_SNAKE_CASE_ : str ='a bowl of pears'
SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCAmelCase , target_prompt=__UpperCAmelCase , generator=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe.invert(
prompt=__UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCAmelCase , num_inference_steps=25 , ).latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe(
prompt=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_latents=__UpperCAmelCase , generator=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE_ : List[str] =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 153 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
while a != 0:
UpperCAmelCase_ , UpperCAmelCase_ = b % a, a
return b
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if gcd(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) != 1:
UpperCAmelCase_ = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1, 0, a
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 1, m
while va != 0:
UpperCAmelCase_ = ua // va
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 579 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
SCREAMING_SNAKE_CASE = True
except ImportError:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@staticmethod
def A__ ( lowerCAmelCase ):
UpperCAmelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowerCAmelCase , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowerCAmelCase , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowerCAmelCase )
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , *lowerCAmelCase ):
UpperCAmelCase_ = testing
UpperCAmelCase_ = testing_file
UpperCAmelCase_ = path
def A__ ( self ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowerCAmelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
UpperCAmelCase_ = (
Path(lowerCAmelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(lowerCAmelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCAmelCase , extra_context=lowerCAmelCase , )
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(lowerCAmelCase )
UpperCAmelCase_ = configuration["lowercase_modelname"]
UpperCAmelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'''{directory}/configuration.json''' )
UpperCAmelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowerCAmelCase )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(lowerCAmelCase ):
with open(lowerCAmelCase , "r" ) as f:
UpperCAmelCase_ = f.readlines()
with open(lowerCAmelCase , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# Create temp file
UpperCAmelCase_ , UpperCAmelCase_ = mkstemp()
UpperCAmelCase_ = False
with fdopen(lowerCAmelCase , "w" ) as new_file:
with open(lowerCAmelCase ) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase )
if line_to_copy_below in line:
UpperCAmelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase , lowerCAmelCase )
# Remove original file
remove(lowerCAmelCase )
# Move new file
move(lowerCAmelCase , lowerCAmelCase )
def skip_units(lowerCAmelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCAmelCase ):
with open(lowerCAmelCase ) as datafile:
UpperCAmelCase_ = []
UpperCAmelCase_ = False
UpperCAmelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(lowerCAmelCase )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(lowerCAmelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase_ = []
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase )
remove(lowerCAmelCase )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(lowerCAmelCase )
| 579 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = """biogpt"""
def __init__( self , lowerCAmelCase=4_23_84 , lowerCAmelCase=10_24 , lowerCAmelCase=24 , lowerCAmelCase=16 , lowerCAmelCase=40_96 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10_24 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = scale_embedding
snake_case = use_cache
snake_case = layerdrop
snake_case = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
| 714 | """simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCAmelCase__ ( _UpperCamelCase : Namespace ) -> List[str]:
"""simple docstring"""
return TrainCommand(_UpperCamelCase )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def snake_case ( lowerCAmelCase ):
"""simple docstring"""
snake_case = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=lowerCAmelCase , required=lowerCAmelCase , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=lowerCAmelCase , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=lowerCAmelCase , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=lowerCAmelCase , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=lowerCAmelCase , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=lowerCAmelCase , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=lowerCAmelCase , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=lowerCAmelCase , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=lowerCAmelCase , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=lowerCAmelCase , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=lowerCAmelCase , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=lowerCAmelCase , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=lowerCAmelCase , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = logging.get_logger('transformers-cli/training' )
snake_case = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=lowerCAmelCase )
snake_case = args.output
snake_case = args.column_label
snake_case = args.column_text
snake_case = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
snake_case = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
snake_case = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
snake_case = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case = args.validation_split
snake_case = args.train_batch_size
snake_case = args.valid_batch_size
snake_case = args.learning_rate
snake_case = args.adam_epsilon
def snake_case ( self ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case ( self ):
"""simple docstring"""
raise NotImplementedError
def snake_case ( self ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 104 | 0 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = [], []
while len(SCREAMING_SNAKE_CASE ) > 1:
lowercase__ , lowercase__ = min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
start.append(SCREAMING_SNAKE_CASE )
end.append(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 43 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCAmelCase : Optional[int] = 2
class lowerCAmelCase__ :
def __init__( self : Any , *, # begin keyword-only arguments
snake_case__ : List[str]="<s>" , snake_case__ : str="<pad>" , snake_case__ : List[str]="</s>" , snake_case__ : Any="<unk>" , snake_case__ : List[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = bos, unk, pad, eos
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Optional[int] = self.add_symbol(snake_case__ )
UpperCAmelCase__ : Tuple = self.add_symbol(snake_case__ )
UpperCAmelCase__ : str = self.add_symbol(snake_case__ )
UpperCAmelCase__ : Optional[int] = self.add_symbol(snake_case__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(snake_case__ )
UpperCAmelCase__ : List[str] = len(self.symbols )
def __eq__( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Dict , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
return sym in self.indices
@classmethod
def __a ( cls : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = cls()
d.add_from_file(snake_case__ )
return d
def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Dict=1 , snake_case__ : Dict=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
UpperCAmelCase__ : List[Any] = self.indices[word]
UpperCAmelCase__ : Dict = self.count[idx] + n
return idx
else:
UpperCAmelCase__ : Optional[Any] = len(self.symbols )
UpperCAmelCase__ : List[str] = idx
self.symbols.append(snake_case__ )
self.count.append(snake_case__ )
return idx
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
return 0
def __a ( self : Any , snake_case__ : Dict ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(snake_case__ ) )
return
UpperCAmelCase__ : Optional[int] = f.readlines()
UpperCAmelCase__ : Dict = self._load_meta(snake_case__ )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase__ , UpperCAmelCase__ : str = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ , UpperCAmelCase__ : str = line.rsplit(" " , 1 )
else:
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Any = int(snake_case__ )
UpperCAmelCase__ : List[Any] = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(snake_case__ ) )
self.add_symbol(snake_case__ , n=snake_case__ , overwrite=snake_case__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = dict((re.sub(r"@@$" , "" , snake_case ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , snake_case ), v) for k, v in d.items() )
UpperCAmelCase__ : Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
UpperCAmelCase__ : List[str] = d[k] # restore
return da
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : int )-> Union[str, Any]:
'''simple docstring'''
if not os.path.exists(snake_case ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(snake_case , exist_ok=snake_case )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
UpperCAmelCase__ : Dict = os.path.join(snake_case , "checkpoint.pt" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
UpperCAmelCase__ : Optional[int] = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : Optional[int] = chkpt["cfg"]["model"]
# dicts
UpperCAmelCase__ : Optional[int] = os.path.join(snake_case , "dict.txt" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
UpperCAmelCase__ : Dict = Dictionary.load(snake_case )
UpperCAmelCase__ : Optional[Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : Tuple = os.path.join(snake_case , VOCAB_FILES_NAMES["vocab_file"] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# merges_file (bpecodes)
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , "bpecodes" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
UpperCAmelCase__ : int = os.path.join(snake_case , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(snake_case , snake_case )
# model config
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , "config.json" )
UpperCAmelCase__ : Any = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-1_2,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# tokenizer config
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , snake_case )
UpperCAmelCase__ : str = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# model
UpperCAmelCase__ : Union[str, Any] = chkpt["model"]
# remove unneeded keys
UpperCAmelCase__ : Union[str, Any] = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
UpperCAmelCase__ : int = model_state_dict.pop(snake_case )
else:
UpperCAmelCase__ : Tuple = model_state_dict.pop(snake_case )
UpperCAmelCase__ : Any = BioGptConfig.from_pretrained(snake_case )
UpperCAmelCase__ : List[Any] = BioGptForCausalLM(snake_case )
# check that it loads ok
model_new.load_state_dict(snake_case )
# save
UpperCAmelCase__ : Union[str, Any] = os.path.join(snake_case , snake_case )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(snake_case , snake_case )
print("Conversion is done!" )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 438 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ : Tuple = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
snake_case__ : Any = {"""allegro/herbert-base-cased""": 5_1_4}
snake_case__ : Dict = {}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = HerbertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase="</s>" , **_UpperCAmelCase , ) -> str:
super().__init__(
__A , __A , tokenizer_file=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , sep_token=__A , **__A , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
UpperCamelCase_ = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 718 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """git_vision_model"""
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3072 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = hidden_act
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
UpperCamelCase_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """git"""
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=101 , _UpperCAmelCase=102 , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
if vision_config is None:
UpperCamelCase_ = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
UpperCamelCase_ = GitVisionConfig(**_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = tie_word_embeddings
UpperCamelCase_ = num_image_with_embedding
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 618 | 0 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def lowerCamelCase ( ):
UpperCAmelCase__ : List[Any] = os.path.dirname(os.path.realpath(_snake_case ) )
UpperCAmelCase__ : Optional[Any] = os.path.join(_snake_case ,'words.txt' )
UpperCAmelCase__ : Any = ''
with open(_snake_case ) as f:
UpperCAmelCase__ : int = f.readline()
UpperCAmelCase__ : Optional[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase__ : Tuple = [
word
for word in [sum(ord(_snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_snake_case )
if __name__ == "__main__":
print(solution())
| 110 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110 | 1 |
import argparse
import struct
import unittest
class _lowercase :
def __init__( self : Union[str, Any] , lowerCamelCase__ : bytes ) -> None:
"""simple docstring"""
A_ = data
# Initialize hash values
A_ = [
0X6a_09_e6_67,
0Xbb_67_ae_85,
0X3c_6e_f3_72,
0Xa5_4f_f5_3a,
0X51_0e_52_7f,
0X9b_05_68_8c,
0X1f_83_d9_ab,
0X5b_e0_cd_19,
]
# Initialize round constants
A_ = [
0X42_8a_2f_98,
0X71_37_44_91,
0Xb5_c0_fb_cf,
0Xe9_b5_db_a5,
0X39_56_c2_5b,
0X59_f1_11_f1,
0X92_3f_82_a4,
0Xab_1c_5e_d5,
0Xd8_07_aa_98,
0X12_83_5b_01,
0X24_31_85_be,
0X55_0c_7d_c3,
0X72_be_5d_74,
0X80_de_b1_fe,
0X9b_dc_06_a7,
0Xc1_9b_f1_74,
0Xe4_9b_69_c1,
0Xef_be_47_86,
0X0f_c1_9d_c6,
0X24_0c_a1_cc,
0X2d_e9_2c_6f,
0X4a_74_84_aa,
0X5c_b0_a9_dc,
0X76_f9_88_da,
0X98_3e_51_52,
0Xa8_31_c6_6d,
0Xb0_03_27_c8,
0Xbf_59_7f_c7,
0Xc6_e0_0b_f3,
0Xd5_a7_91_47,
0X06_ca_63_51,
0X14_29_29_67,
0X27_b7_0a_85,
0X2e_1b_21_38,
0X4d_2c_6d_fc,
0X53_38_0d_13,
0X65_0a_73_54,
0X76_6a_0a_bb,
0X81_c2_c9_2e,
0X92_72_2c_85,
0Xa2_bf_e8_a1,
0Xa8_1a_66_4b,
0Xc2_4b_8b_70,
0Xc7_6c_51_a3,
0Xd1_92_e8_19,
0Xd6_99_06_24,
0Xf4_0e_35_85,
0X10_6a_a0_70,
0X19_a4_c1_16,
0X1e_37_6c_08,
0X27_48_77_4c,
0X34_b0_bc_b5,
0X39_1c_0c_b3,
0X4e_d8_aa_4a,
0X5b_9c_ca_4f,
0X68_2e_6f_f3,
0X74_8f_82_ee,
0X78_a5_63_6f,
0X84_c8_78_14,
0X8c_c7_02_08,
0X90_be_ff_fa,
0Xa4_50_6c_eb,
0Xbe_f9_a3_f7,
0Xc6_71_78_f2,
]
A_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : bytes ) -> bytes:
"""simple docstring"""
A_ = b'''\x80''' + (b'''\x00''' * (6_3 - (len(lowerCamelCase__ ) + 8) % 6_4))
A_ = struct.pack('''>Q''' , (len(lowerCamelCase__ ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase ( self : List[Any] ) -> None:
"""simple docstring"""
A_ = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A_ = list(struct.unpack('''>16L''' , lowerCamelCase__ ) )
# add 48 0-ed integers
words += [0] * 4_8
A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
A_ = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
A_ = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
A_ = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
A_ = self.ror(lowerCamelCase__ , 6 ) ^ self.ror(lowerCamelCase__ , 1_1 ) ^ self.ror(lowerCamelCase__ , 2_5 )
A_ = (e & f) ^ ((~e & 0Xff_ff_ff_ff) & g)
A_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
A_ = self.ror(lowerCamelCase__ , 2 ) ^ self.ror(lowerCamelCase__ , 1_3 ) ^ self.ror(lowerCamelCase__ , 2_2 )
A_ = (a & b) ^ (a & c) ^ (b & c)
A_ = (sa + maj) % 0X1_00_00_00_00
A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
A_ = [a, b, c, d, e, f, g, h]
# Modify final values
A_ = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
A_ = ''''''.join([hex(lowerCamelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> int:
"""simple docstring"""
return 0Xff_ff_ff_ff & (value << (3_2 - rotations)) | (value >> rotations)
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self : str ) -> None:
"""simple docstring"""
import hashlib
A_ = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(lowerCamelCase__ ).hash , hashlib.shaaaa(lowerCamelCase__ ).hexdigest() )
def _lowerCamelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
A_ = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
A_ = parser.parse_args()
A_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
A_ = f.read()
else:
A_ = bytes(SCREAMING_SNAKE_CASE , '''utf-8''' )
print(SHAaaa(SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 563 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""DeiTFeatureExtractor"""]
__lowercase = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 563 | 1 |
import math
class _UpperCAmelCase :
"""simple docstring"""
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : list[list[float]] , lowerCAmelCase_ : list[int] ) -> int:
__lowerCAmelCase = 0.0
__lowerCAmelCase = 0.0
for i in range(len(lowerCAmelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase ( self : str , lowerCAmelCase_ : list[list[int | float]] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> list[list[int | float]]:
for i in range(len(lowerCAmelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def a_ ( ):
# Training Examples ( m, n )
__lowerCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowerCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowerCAmelCase = SelfOrganizingMap()
__lowerCAmelCase = 3
__lowerCAmelCase = 0.5
for _ in range(lowerCAmelCase_ ):
for j in range(len(lowerCAmelCase_ ) ):
# training sample
__lowerCAmelCase = training_samples[j]
# Compute the winning vector
__lowerCAmelCase = self_organizing_map.get_winner(lowerCAmelCase_, lowerCAmelCase_ )
# Update the winning vector
__lowerCAmelCase = self_organizing_map.update(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# classify test sample
__lowerCAmelCase = [0, 0, 0, 1]
__lowerCAmelCase = self_organizing_map.get_winner(lowerCAmelCase_, lowerCAmelCase_ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 53 | import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :Any = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :Any = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[int] = dpr_record['''question''']
__UpperCamelCase :Optional[int] = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 167 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a = sys.version_info >= (3, 10)
def UpperCamelCase_( __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__magic_name__ )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : float
lowerCamelCase : str
lowerCamelCase : bool
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 42
lowerCamelCase : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : Optional[bool] = None
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Any = 'titi'
lowerCamelCase : Optional[Any] = 'toto'
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = 'titi'
lowerCamelCase : int = 'toto'
lowerCamelCase : Any = 42
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : BasicEnum = "toto"
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :int = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : MixedTypeEnum = "toto"
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Dict = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[float] = field(default=snake_case__ , metadata={'help': 'help message'} )
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[List[str]] = list_field(default=[] )
lowerCamelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : List[int] = list_field(default=[] )
lowerCamelCase : List[int] = list_field(default=[1, 2, 3] )
lowerCamelCase : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
lowerCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : List[int] = field()
lowerCamelCase : str = field()
lowerCamelCase : BasicEnum = field()
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Any = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : "BasicEnum" = field()
lowerCamelCase : "Optional[bool]" = None
lowerCamelCase : "str" = field(default='toto' , metadata={'help': 'help message'} )
lowerCamelCase : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : bool | None = None
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int | None = None
lowerCamelCase : float | None = field(default=snake_case__ , metadata={'help': 'help message'} )
lowerCamelCase : str | None = None
lowerCamelCase : list[str] | None = list_field(default=[] )
lowerCamelCase : list[int] | None = list_field(default=[] )
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: argparse.ArgumentParser , _UpperCAmelCase: argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_lowerCAmelCase :Tuple = {k: v for k, v in vars(_UpperCAmelCase ).items() if k != 'container'}
_lowerCAmelCase :Union[str, Any] = {k: v for k, v in vars(_UpperCAmelCase ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , _UpperCAmelCase ) and yy.get('choices' , _UpperCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](_UpperCAmelCase ) , yy['type'](_UpperCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :str = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :Any = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--bar' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--baz' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--flag' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[Any] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
(_lowerCAmelCase ) :List[str] = parser.parse_args_into_dataclasses(_UpperCAmelCase , look_for_args_file=_UpperCAmelCase )
self.assertFalse(example.flag )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[str] = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=_UpperCAmelCase )
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message' )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :str = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' )
expected.add_argument('--baz' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=_UpperCAmelCase , dest='baz' )
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase )
_lowerCAmelCase :List[str] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase )
for dataclass_type in dataclass_types:
_lowerCAmelCase :Optional[Any] = HfArgumentParser(_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = parser.parse_args([] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
_lowerCAmelCase :Optional[int] = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
_lowerCAmelCase :int = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
_lowerCAmelCase :Dict = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Any = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :Tuple = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Tuple = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
_lowerCAmelCase :List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_lowerCAmelCase :int = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
_lowerCAmelCase :Tuple = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_lowerCAmelCase :str = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
_lowerCAmelCase :List[Any] = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Literal["titi", "toto", 42] = "toto"
_lowerCAmelCase :List[Any] = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :str = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
_lowerCAmelCase :int = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
_lowerCAmelCase :List[str] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Dict = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :int = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_UpperCAmelCase )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_UpperCAmelCase )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :str = parser.parse_args([] )
self.assertEqual(
_UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
_lowerCAmelCase :List[str] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(_UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Tuple = argparse.ArgumentParser()
expected.add_argument('--foo' , default=_UpperCAmelCase , type=_UpperCAmelCase )
expected.add_argument('--bar' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='help message' )
expected.add_argument('--baz' , default=_UpperCAmelCase , type=_UpperCAmelCase )
expected.add_argument('--ces' , nargs='+' , default=[] , type=_UpperCAmelCase )
expected.add_argument('--des' , nargs='+' , default=[] , type=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase )
for dataclass_type in dataclass_types:
_lowerCAmelCase :Tuple = HfArgumentParser(_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = parser.parse_args([] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , bar=_UpperCAmelCase , baz=_UpperCAmelCase , ces=[] , des=[] ) )
_lowerCAmelCase :List[str] = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(_UpperCAmelCase , Namespace(foo=12 , bar=3.1_4 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Dict = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--required_str' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :str = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :int = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase )
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
_lowerCAmelCase :Optional[int] = parser.parse_dict(_UpperCAmelCase )[0]
_lowerCAmelCase :Optional[int] = BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :List[str] = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(_UpperCAmelCase , parser.parse_dict , _UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Union[str, Any] = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :Any = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase :Any = os.path.join(_UpperCAmelCase , 'temp_json' )
os.mkdir(_UpperCAmelCase )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
_lowerCAmelCase :str = BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = HfArgumentParser(_UpperCAmelCase )
_lowerCAmelCase :Tuple = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase :List[str] = os.path.join(_UpperCAmelCase , 'temp_yaml' )
os.mkdir(_UpperCAmelCase )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
_lowerCAmelCase :Union[str, Any] = BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = HfArgumentParser(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase ) | 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""MobileViTFeatureExtractor"""]
a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 382 | 0 |
'''simple docstring'''
import baseaa
def a_ ( __UpperCAmelCase ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode('utf-8' ) )
def a_ ( __UpperCAmelCase ) -> str:
"""simple docstring"""
return baseaa.aaadecode(__UpperCAmelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list:
"""simple docstring"""
snake_case: Optional[int] =len(__UpperCAmelCase )
snake_case: Optional[int] =[[0] * n for i in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
snake_case: Optional[int] =y_points[i]
for i in range(2 , __UpperCAmelCase ):
for j in range(__UpperCAmelCase , __UpperCAmelCase ):
snake_case: List[str] =(
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 | 1 |
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> str:
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
a_ : Optional[int] = len(__A )
a_ : Optional[Any] = max(__A )
a_ : List[str] = min(__A )
# create the counting array
a_ : int = coll_max + 1 - coll_min
a_ : Union[str, Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __A ):
a_ : Dict = counting_arr[i] + counting_arr[i - 1]
# create the output collection
a_ : Union[str, Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __A ) ):
a_ : Tuple = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return "".join([chr(__A ) for i in counting_sort([ord(__A ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
UpperCAmelCase_ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 443 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
a_ : str = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
a_ : Any = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__A )
# Let's go
a_ : int = parser.parse_args()
if not hasattr(__A , 'func' ):
parser.print_help()
exit(1 )
# Run
a_ : Union[str, Any] = args.func(__A )
service.run()
if __name__ == "__main__":
main()
| 443 | 1 |
def A ( ):
return 1
def A ( _lowercase ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def A ( _lowercase ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowercase )
def A ( _lowercase = 200 ):
return two_pound(_lowercase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 248 | def A ( _lowercase , _lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def A ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 248 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Optional[int] = len(UpperCamelCase_ )
# We need to create solution object to save path.
_a : Optional[int] = [[0 for _ in range(UpperCamelCase_ )] for _ in range(UpperCamelCase_ )]
_a : int = run_maze(UpperCamelCase_ , 0 , 0 , UpperCamelCase_ )
if solved:
print('''\n'''.join(str(UpperCamelCase_ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : Dict = len(UpperCamelCase_ )
# Final check point.
if i == j == (size - 1):
_a : Optional[int] = 1
return True
_a : int = (not i < 0) and (not j < 0) # Check lower bounds
_a : Any = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_a : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_a : Union[str, Any] = 1
# check for directions
if (
run_maze(UpperCamelCase_ , i + 1 , UpperCamelCase_ , UpperCamelCase_ )
or run_maze(UpperCamelCase_ , UpperCamelCase_ , j + 1 , UpperCamelCase_ )
or run_maze(UpperCamelCase_ , i - 1 , UpperCamelCase_ , UpperCamelCase_ )
or run_maze(UpperCamelCase_ , UpperCamelCase_ , j - 1 , UpperCamelCase_ )
):
return True
_a : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Tuple = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : Optional[int] = {"""vocab_file""": """spiece.model"""}
_A : List[Any] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
_A : Tuple = {
"""albert-base-v1""": 5_12,
"""albert-large-v1""": 5_12,
"""albert-xlarge-v1""": 5_12,
"""albert-xxlarge-v1""": 5_12,
"""albert-base-v2""": 5_12,
"""albert-large-v2""": 5_12,
"""albert-xlarge-v2""": 5_12,
"""albert-xxlarge-v2""": 5_12,
}
_A : Optional[Any] = """▁"""
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Dict = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_=True , A_=True , A_=False , A_="[CLS]" , A_="[SEP]" , A_="<unk>" , A_="[SEP]" , A_="<pad>" , A_="[CLS]" , A_="[MASK]" , A_ = None , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (
AddedToken(A_ , lstrip=A_ , rstrip=A_ , normalized=A_ )
if isinstance(A_ , A_ )
else mask_token
)
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = remove_space
SCREAMING_SNAKE_CASE__ = keep_accents
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def lowercase_ ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , A_ ):
'''simple docstring'''
if self.remove_space:
SCREAMING_SNAKE_CASE__ = ''' '''.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE__ = inputs
SCREAMING_SNAKE_CASE__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE__ = unicodedata.normalize('''NFKD''' , A_ )
SCREAMING_SNAKE_CASE__ = ''''''.join([c for c in outputs if not unicodedata.combining(A_ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE__ = outputs.lower()
return outputs
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.preprocess_text(A_ )
SCREAMING_SNAKE_CASE__ = self.sp_model.encode(A_ , out_type=A_ )
SCREAMING_SNAKE_CASE__ = []
for piece in pieces:
if len(A_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(A_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE__ = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A_ )
else:
new_pieces.append(A_ )
return new_pieces
def lowercase_ ( self , A_ ):
'''simple docstring'''
return self.sp_model.PieceToId(A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(A_ )
SCREAMING_SNAKE_CASE__ = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 100 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : np.ndarray
_UpperCamelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 112 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase__ ( UpperCAmelCase__=32, UpperCAmelCase__=10, UpperCAmelCase__=1_00, UpperCAmelCase__=10_26, UpperCAmelCase__=True, UpperCAmelCase__="data/tokenized_stories_train_wikitext103.jbl", UpperCAmelCase__="igf_context_pairs.jbl", ) -> List[str]:
set_seed(3 )
# generate train_data and objective_set
A_ , A_ = generate_datasets(
UpperCAmelCase__, UpperCAmelCase__, number=UpperCAmelCase__, min_len=10_26, trim=UpperCAmelCase__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
A_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
A_ = load_gpta("""gpt2""" ).to(UpperCAmelCase__ )
print("""computing perplexity on objective set""" )
A_ = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ).item()
print("""perplexity on objective set:""", UpperCAmelCase__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=15, UpperCAmelCase__=1_28, UpperCAmelCase__=1_00, UpperCAmelCase__="igf_model.pt", ) -> Union[str, Any]:
set_seed(42 )
# Load pre-trained model
A_ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
A_ = SecondaryLearner(UpperCAmelCase__ )
# Train secondary learner
A_ = train_secondary_learner(
UpperCAmelCase__, UpperCAmelCase__, max_epochs=UpperCAmelCase__, batch_size=UpperCAmelCase__, eval_freq=1_00, igf_model_path=UpperCAmelCase__, )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=32, UpperCAmelCase__=10_00, UpperCAmelCase__=16, UpperCAmelCase__=1.0, UpperCAmelCase__=recopy_gpta, UpperCAmelCase__=None, UpperCAmelCase__=10, UpperCAmelCase__="gpt2_finetuned.pt", ) -> int:
A_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
A_ = RandomSampler(UpperCAmelCase__ )
A_ = DataLoader(UpperCAmelCase__, sampler=UpperCAmelCase__ )
A_ = max_steps // (len(UpperCAmelCase__ )) + 1
A_ = 0
A_ = torch.zeros((1, context_len), dtype=torch.long, device=UpperCAmelCase__ )
A_ , A_ , A_ = recopy_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCAmelCase__ )
secondary_learner.eval()
A_ = []
A_ = 0
A_ = []
A_ = []
# Compute the performance of the transformer model at the beginning
A_ = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
test_perps.append(UpperCAmelCase__ )
print("""Test perplexity, step""", UpperCAmelCase__, """:""", UpperCAmelCase__ )
for epoch in range(int(UpperCAmelCase__ ) ):
for step, example in enumerate(UpperCAmelCase__ ):
torch.cuda.empty_cache()
A_ = random.randint(0, example.size(2 ) - context_len - 1 )
A_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
A_ = model(UpperCAmelCase__, labels=UpperCAmelCase__ )
A_ = True
if secondary_learner is not None:
A_ = secondary_learner.forward(
torch.tensor(UpperCAmelCase__, dtype=torch.long, device=UpperCAmelCase__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCAmelCase__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
A_ = -1
if predicted_q < threshold:
A_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
A_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
A_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
A_ = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
test_perps.append(UpperCAmelCase__ )
print("""Test perplexity, step""", UpperCAmelCase__, """:""", UpperCAmelCase__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict(), UpperCAmelCase__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase__ ( ) -> List[str]:
A_ = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""The input data dir. Should contain data files for WikiText.""", )
parser.add_argument(
"""--model_name_or_path""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""Path to pretrained model or model identifier from huggingface.co/models""", )
parser.add_argument(
"""--data_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
), )
parser.add_argument(
"""--igf_data_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""A jbl file containing the context and information gain pairs to train secondary learner.""", )
parser.add_argument(
"""--output_dir""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""The output directory where the final fine-tuned model is stored.""", )
parser.add_argument(
"""--tokenizer_name""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""Pretrained tokenizer name or path if not the same as model_name""", )
parser.add_argument("""--seed""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""", default=32, type=UpperCAmelCase__, help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
), )
parser.add_argument(
"""--size_objective_set""", default=1_00, type=UpperCAmelCase__, help="""number of articles that are long enough to be used as our objective set""", )
parser.add_argument(
"""--eval_freq""", default=1_00, type=UpperCAmelCase__, help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""", default=10_00, type=UpperCAmelCase__, help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""", default=1_28, type=UpperCAmelCase__, help="""batch size of training data for secondary learner""", )
parser.add_argument(
"""--batch_size""", default=16, type=UpperCAmelCase__, help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""", default=10, type=UpperCAmelCase__, help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
), )
parser.add_argument(
"""--number""", default=1_00, type=UpperCAmelCase__, help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""", default=10_26, type=UpperCAmelCase__, help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""", default=15, type=UpperCAmelCase__, help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""", default=1.0, type=UpperCAmelCase__, help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
), )
parser.add_argument("""--finetuned_model_name""", default="""gpt2_finetuned.pt""", type=UpperCAmelCase__, help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""Reset the model to the original pretrained GPT-2 weights after each iteration""", )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32, max_steps=10, size_objective_set=1_00, min_len=10_26, trim=UpperCAmelCase__, data_file="""data/tokenized_stories_train_wikitext103.jbl""", igf_data_file="""igf_context_pairs.jbl""", )
# Load train data for secondary learner
A_ = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
A_ = training_secondary_learner(
UpperCAmelCase__, secondary_learner_max_epochs=15, secondary_learner_batch_size=1_28, eval_freq=1_00, igf_model_path="""igf_model.pt""", )
# load pretrained gpt2 model
A_ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
A_ , A_ = generate_datasets(
context_len=32, file="""data/tokenized_stories_train_wikitext103.jbl""", number=1_00, min_len=10_26, trim=UpperCAmelCase__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, context_len=32, max_steps=10_00, batch_size=16, threshold=1.0, recopy_model=UpperCAmelCase__, secondary_learner=UpperCAmelCase__, eval_interval=10, finetuned_model_name="""gpt2_finetuned.pt""", )
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
UpperCAmelCase__ : List[str] = "Usage of script: script_name <size_of_canvas:int>"
UpperCAmelCase__ : Dict = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def A ( UpperCamelCase_ : int ) -> list[list[bool]]:
'''simple docstring'''
lowerCAmelCase__ = [[False for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )]
return canvas
def A ( UpperCamelCase_ : list[list[bool]] ) -> None:
'''simple docstring'''
for i, row in enumerate(UpperCamelCase_ ):
for j, _ in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = bool(random.getrandbits(1 ) )
def A ( UpperCamelCase_ : list[list[bool]] ) -> list[list[bool]]:
'''simple docstring'''
lowerCAmelCase__ = np.array(UpperCamelCase_ )
lowerCAmelCase__ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(UpperCamelCase_ ):
for c, pt in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = __judge_point(
UpperCamelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCAmelCase__ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCAmelCase__ = current_canvas.tolist()
return return_canvas
def A ( UpperCamelCase_ : bool , UpperCamelCase_ : list[list[bool]] ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCAmelCase__ = pt
if pt:
if alive < 2:
lowerCAmelCase__ = False
elif alive == 2 or alive == 3:
lowerCAmelCase__ = True
elif alive > 3:
lowerCAmelCase__ = False
else:
if alive == 3:
lowerCAmelCase__ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
UpperCAmelCase__ : List[str] = int(sys.argv[1])
# main working structure of this module.
UpperCAmelCase__ : Optional[int] = create_canvas(canvas_size)
seed(c)
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = plt.subplots()
fig.show()
UpperCAmelCase__ : Tuple = ListedColormap(["w", "k"])
try:
while True:
UpperCAmelCase__ : Optional[int] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 48 |
def __a ( SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
__UpperCAmelCase = False
if low == high:
return swapped
__UpperCAmelCase = low
__UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
__UpperCAmelCase , __UpperCAmelCase = (
collection[right],
collection[left],
)
__UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__UpperCAmelCase , __UpperCAmelCase = (
collection[right + 1],
collection[left],
)
__UpperCAmelCase = True
__UpperCAmelCase = low + int((high - low) / 2 )
__UpperCAmelCase = circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = circle_sort_util(SCREAMING_SNAKE_CASE , mid + 1 , SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
__UpperCAmelCase = True
while is_not_sorted is True:
__UpperCAmelCase = circle_sort_util(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
A_ : str = input('Enter numbers separated by a comma:\n').strip()
A_ : List[str] = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 303 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : Tuple =False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=3 , __lowerCamelCase=18 , __lowerCamelCase=30 , __lowerCamelCase=400 , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=None , ):
'''simple docstring'''
__A : Any = size if size is not None else {'height': 20, 'width': 20}
__A : Any = parent
__A : Any = batch_size
__A : Dict = num_channels
__A : str = image_size
__A : List[str] = min_resolution
__A : Dict = max_resolution
__A : Tuple = size
__A : str = do_normalize
__A : Any = do_convert_rgb
__A : List[str] = [512, 1024, 2048, 4096]
__A : Optional[int] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def _a ( self ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _a ( self ):
'''simple docstring'''
__A : Union[str, Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
__A : int = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __snake_case( A_ , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def _a ( self ):
'''simple docstring'''
__A : Any = PixaStructImageProcessingTester(self )
@property
def _a ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
'''simple docstring'''
__A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_convert_rgb' ) )
def _a ( self ):
'''simple docstring'''
__A : Any = self.image_processor_tester.prepare_dummy_image()
__A : int = self.image_processing_class(**self.image_processor_dict )
__A : int = 2048
__A : Union[str, Any] = image_processor(__lowerCamelCase , return_tensors='pt' , max_patches=__lowerCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def _a ( self ):
'''simple docstring'''
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : List[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : int = image_processor(
__lowerCamelCase , return_tensors='pt' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _a ( self ):
'''simple docstring'''
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
__A : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowerCamelCase ):
__A : Any = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=__lowerCamelCase ).flattened_patches
__A : Union[str, Any] = 'Hello'
__A : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=__lowerCamelCase , header_text=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : List[str] = image_processor(
__lowerCamelCase , return_tensors='pt' , max_patches=__lowerCamelCase , header_text=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
__A : Tuple = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : List[Any] = image_processor(
__lowerCamelCase , return_tensors='pt' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _a ( self ):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : List[str] = image_processor(
__lowerCamelCase , return_tensors='pt' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __snake_case( A_ , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def _a ( self ):
'''simple docstring'''
__A : Dict = PixaStructImageProcessingTester(self , num_channels=4 )
__A : Any = 3
@property
def _a ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_convert_rgb' ) )
def _a ( self ):
'''simple docstring'''
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : int = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Tuple = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : List[Any] = image_processor(
__lowerCamelCase , return_tensors='pt' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 716 | """simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Dict =logging.get_logger(__name__)
lowerCamelCase : Any ='''https://openaipublic.azureedge.net/jukebox/models/'''
lowerCamelCase : Optional[int] ={
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
__A : List[Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
__A : Tuple = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
__A : List[str] = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
__A : int = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
__A : Optional[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
__A : Optional[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__A : Tuple = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
__A : Dict = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def _lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
'''simple docstring'''
__A : Optional[Any] = {}
import re
__A : Optional[int] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : Dict = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Union[str, Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : Any = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : int = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
__A : Optional[Any] = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Any = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : str = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = regex_match.groups()
__A : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
__A : Optional[int] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__A : str = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : int = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
__A : Dict = regex_match.groups()
__A : Tuple = int(groups[2] ) * 2 + int(groups[3] )
__A : List[Any] = {'1': 1, '3': 2}[groups[-2]]
__A : Optional[int] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__A : List[str] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__A : List[str] = prefix + resnet_block
__A : Optional[int] = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : List[Any] = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = regex_match.groups()
__A : str = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__A : Union[str, Any] = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[int] = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = regex_match.groups()
__A : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : int = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__A : Tuple = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[int] = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = regex_match.groups()
__A : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : Optional[int] = {'1': 1, '3': 2}[groups[-2]]
__A : List[Any] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__A : str = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__A : Optional[Any] = prefix + resnet_block
__A : List[Any] = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[int] = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
__A : str = regex_match.groups()
__A : str = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__A : int = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Tuple = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
__A : List[Any] = regex_match.groups()
__A : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : List[str] = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__A : Tuple = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Any = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = regex_match.groups()
__A : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : Any = {'1': 1, '3': 2}[groups[-2]]
__A : Any = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__A : List[Any] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__A : Tuple = prefix + resnet_block
__A : List[Any] = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = regex_match.groups()
__A : int = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__A : Optional[Any] = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
__A : List[Any] = original_key
__A : List[Any] = replace_key(_SCREAMING_SNAKE_CASE )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
__A : Optional[int] = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__A : Optional[Any] = original_key
__A : Union[str, Any] = original_key
__A : Optional[int] = value
return new_dict
@torch.no_grad()
def _lowercase ( _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Any=None ) -> Any:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__A : int = requests.get(F'{PREFIX}{file}' , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=_SCREAMING_SNAKE_CASE )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
__A : List[str] = MODEL_MAPPING[model_name.split('/' )[-1]]
__A : Optional[int] = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__A : int = JukeboxModel(_SCREAMING_SNAKE_CASE )
__A : int = []
__A : Tuple = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
__A : List[Any] = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
__A : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
__A : Any = old_dic[k]
elif k.endswith('.w' ):
__A : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__A : Dict = old_dic[k]
else:
__A : str = old_dic[k]
__A : Dict = 'vqvae' if i == 0 else F'priors.{3 - i}'
__A : Dict = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
__A : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(F'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
lowerCamelCase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowerCamelCase : List[Any] =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 237 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : str = logging.get_logger(__name__)
# TODO Update this
a__ : Union[str, Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
snake_case__ : Tuple = 'esm'
def __init__( self : Tuple , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=7_6_8 , UpperCAmelCase__ : Optional[Any]=1_2 , UpperCAmelCase__ : Optional[Any]=1_2 , UpperCAmelCase__ : Any=3_0_7_2 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[int]=1_0_2_6 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Dict , ) -> List[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , mask_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = emb_layer_norm_before
__SCREAMING_SNAKE_CASE = token_dropout
__SCREAMING_SNAKE_CASE = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
__SCREAMING_SNAKE_CASE = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = EsmFoldConfig(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
__SCREAMING_SNAKE_CASE = get_default_vocab_list()
else:
__SCREAMING_SNAKE_CASE = vocab_list
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = None
snake_case__ : bool = True
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : float = 0
snake_case__ : bool = True
snake_case__ : bool = False
snake_case__ : int = 128
snake_case__ : "TrunkConfig" = None
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
if self.trunk is None:
__SCREAMING_SNAKE_CASE = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = TrunkConfig(**self.trunk )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
__SCREAMING_SNAKE_CASE = asdict(self )
__SCREAMING_SNAKE_CASE = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : int = 48
snake_case__ : int = 1024
snake_case__ : int = 128
snake_case__ : int = 32
snake_case__ : int = 32
snake_case__ : int = 32
snake_case__ : float = 0
snake_case__ : float = 0
snake_case__ : bool = False
snake_case__ : int = 4
snake_case__ : Optional[int] = 128
snake_case__ : "StructureModuleConfig" = None
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
if self.structure_module is None:
__SCREAMING_SNAKE_CASE = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
__SCREAMING_SNAKE_CASE = self.sequence_state_dim // self.sequence_head_width
__SCREAMING_SNAKE_CASE = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = asdict(self )
__SCREAMING_SNAKE_CASE = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : int = 384
snake_case__ : int = 128
snake_case__ : int = 16
snake_case__ : int = 128
snake_case__ : int = 12
snake_case__ : int = 4
snake_case__ : int = 8
snake_case__ : float = 0.1
snake_case__ : int = 8
snake_case__ : int = 1
snake_case__ : int = 2
snake_case__ : int = 7
snake_case__ : int = 10
snake_case__ : float = 1E-8
snake_case__ : float = 1E5
def UpperCAmelCase_ ( self : Any ) -> Dict:
return asdict(self )
def UpperCAmelCase__ ():
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 682 |
"""simple docstring"""
from collections import deque
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = process_name # process name
lowerCamelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase__ = arrival_time
lowerCamelCase__ = burst_time # remaining burst time
lowerCamelCase__ = 0 # total time of the process wait in ready queue
lowerCamelCase__ = 0 # time from arrival time to completion time
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : deque[Process] , SCREAMING_SNAKE_CASE__ : int , ):
# total number of mlfq's queues
lowerCamelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCamelCase__ = queue
# current time
lowerCamelCase__ = current_time
# finished process is in this sequence queue
lowerCamelCase__ = deque()
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : list[Process] ):
lowerCamelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : list[Process] ):
lowerCamelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : list[Process] ):
lowerCamelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : deque[Process] ):
return [q.burst_time for q in queue]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : deque[Process] ):
lowerCamelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE__ ) != 0:
lowerCamelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase__ = 0
# set the process's turnaround time because it is finished
lowerCamelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE__ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : deque[Process] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowerCamelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase__ = 0
# set the finish time
lowerCamelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE__ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _UpperCamelCase ( self : Dict ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase__ , lowerCamelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_snake_case = Process("P1", 0, 53)
_snake_case = Process("P2", 0, 17)
_snake_case = Process("P3", 0, 68)
_snake_case = Process("P4", 0, 24)
_snake_case = 3
_snake_case = [17, 25]
_snake_case = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_snake_case = Process("P1", 0, 53)
_snake_case = Process("P2", 0, 17)
_snake_case = Process("P3", 0, 68)
_snake_case = Process("P4", 0, 24)
_snake_case = 3
_snake_case = [17, 25]
_snake_case = deque([Pa, Pa, Pa, Pa])
_snake_case = MLFQ(number_of_queues, time_slices, queue, 0)
_snake_case = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 510 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=8 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase_ ( _snake_case ,_snake_case=512 ,_snake_case=512 ):
SCREAMING_SNAKE_CASE__ : Optional[int] = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
SCREAMING_SNAKE_CASE__ : Tuple = np.array(pil_image.convert("""RGB""" ) )
SCREAMING_SNAKE_CASE__ : List[str] = arr.astype(np.floataa ) / 127.5 - 1
SCREAMING_SNAKE_CASE__ : int = np.transpose(__lowerCAmelCase ,[2, 0, 1] )
SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
return image
class lowerCAmelCase_ (UpperCAmelCase__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , movq=lowerCamelCase__ , )
SCREAMING_SNAKE_CASE__ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Any:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}''' )
SCREAMING_SNAKE_CASE__ : str = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image
else:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase__ )
]
SCREAMING_SNAKE_CASE__ : str = torch.cat(lowerCamelCase__ , dim=0 )
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.movq.encode(lowerCamelCase__ ).latent_dist.sample(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE__ : int = init_latents.shape
SCREAMING_SNAKE_CASE__ : List[Any] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = init_latents
return latents
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=0 ) -> str:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE__ : List[str] = torch.device(F'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE__ : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase__ , lowerCamelCase__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
SCREAMING_SNAKE_CASE__ : Dict = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE__ : Tuple = cpu_offload_with_hook(lowerCamelCase__ , lowerCamelCase__ , prev_module_hook=lowerCamelCase__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE__ : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 5_12 , SCREAMING_SNAKE_CASE__ = 5_12 , SCREAMING_SNAKE_CASE__ = 1_00 , SCREAMING_SNAKE_CASE__ = 4.0 , SCREAMING_SNAKE_CASE__ = 0.3 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self._execution_device
SCREAMING_SNAKE_CASE__ : str = guidance_scale > 1.0
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(lowerCamelCase__ , dim=0 )
SCREAMING_SNAKE_CASE__ : Any = image_embeds.shape[0]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(lowerCamelCase__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : str = image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = negative_image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
SCREAMING_SNAKE_CASE__ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase__ )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = [image]
if not all(isinstance(lowerCamelCase__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(lowerCamelCase__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat([prepare_image(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE__ : int = image.to(dtype=image_embeds.dtype , device=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = self.movq.encode(lowerCamelCase__ )["latents"]
SCREAMING_SNAKE_CASE__ : List[str] = latents.repeat_interleave(lowerCamelCase__ , dim=0 )
self.scheduler.set_timesteps(lowerCamelCase__ , device=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = downscale_height_and_width(lowerCamelCase__ , lowerCamelCase__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_latents(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , image_embeds.dtype , lowerCamelCase__ , lowerCamelCase__ )
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"image_embeds": image_embeds}
SCREAMING_SNAKE_CASE__ : Any = self.unet(
sample=lowerCamelCase__ , timestep=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , added_cond_kwargs=lowerCamelCase__ , return_dict=lowerCamelCase__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : Tuple = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE__ : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE__ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.movq.decode(lowerCamelCase__ , force_not_quantize=lowerCamelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE__ : Tuple = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ : List[Any] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : Tuple = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 718 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : Tuple = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 545 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = ParquetConfig
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : int = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase__ ( self : str , _A : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' )
raise
| 74 |
def _lowercase ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCamelCase__ : Optional[Any] = mf_knapsack(i - 1 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
else:
UpperCamelCase__ : Optional[int] = max(
mf_knapsack(i - 1 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) ,mf_knapsack(i - 1 ,__lowerCamelCase ,__lowerCamelCase ,j - wt[i - 1] ) + val[i - 1] ,)
UpperCamelCase__ : Optional[int] = val
return f[i][j]
def _lowercase ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : List[Any] ,__lowerCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 ,n + 1 ):
for w_ in range(1 ,w + 1 ):
if wt[i - 1] <= w_:
UpperCamelCase__ : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] ,dp[i - 1][w_] )
else:
UpperCamelCase__ : Optional[Any] = dp[i - 1][w_]
return dp[n][w_], dp
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : list ,__lowerCamelCase : list ) -> List[Any]:
'''simple docstring'''
if not (isinstance(__lowerCamelCase ,(list, tuple) ) and isinstance(__lowerCamelCase ,(list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
UpperCamelCase__ : int = len(__lowerCamelCase )
if num_items != len(__lowerCamelCase ):
UpperCamelCase__ : int = (
'''The number of weights must be the same as the number of values.\n'''
F'But got {num_items} weights and {len(__lowerCamelCase )} values'
)
raise ValueError(__lowerCamelCase )
for i in range(__lowerCamelCase ):
if not isinstance(wt[i] ,__lowerCamelCase ):
UpperCamelCase__ : Tuple = (
'''All weights must be integers but got weight of '''
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(__lowerCamelCase )
UpperCamelCase__ ,UpperCamelCase__ : Tuple = knapsack(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
UpperCamelCase__ : set = set()
_construct_solution(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
return optimal_val, example_optional_set
def _lowercase ( __lowerCamelCase : list ,__lowerCamelCase : list ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : set ) -> Dict:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__lowerCamelCase ,__lowerCamelCase ,i - 1 ,__lowerCamelCase ,__lowerCamelCase )
else:
optimal_set.add(__lowerCamelCase )
_construct_solution(__lowerCamelCase ,__lowerCamelCase ,i - 1 ,j - wt[i - 1] ,__lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = [3, 2, 4, 4]
_SCREAMING_SNAKE_CASE : Any = [4, 3, 2, 3]
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : int = 6
_SCREAMING_SNAKE_CASE : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE : Dict = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE : Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 344 | 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_snake_case : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A ( _a ):
def __init__( self : Dict , *lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Dict ) -> Any:
"""simple docstring"""
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = eval_examples
_a = post_process_function
_a = quant_trainer_args
_a = 1_28 # default number of calibration samples
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Any=None ) -> Optional[int]:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
_a = calib_dataset if calib_dataset is not None else self.calib_dataset
_a = self._remove_unused_columns(lowerCAmelCase_ , description='''Calibration''' )
return DataLoader(
lowerCAmelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Dict=None ) -> int:
"""simple docstring"""
_a = self.train_dataset if calib_dataset is None else calib_dataset
_a = self.get_calib_dataloader(lowerCAmelCase_ )
_a = self.model
quant_trainer.configure_model(lowerCAmelCase_ , self.quant_trainer_args , calib=lowerCAmelCase_ )
model.eval()
quant_trainer.enable_calibration(lowerCAmelCase_ )
logger.info('''***** Running calibration *****''' )
logger.info(F' Num examples = {self.calib_num}' )
logger.info(F' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(lowerCAmelCase_ ):
# Prediction step
_a , _a , _a = self.prediction_step(lowerCAmelCase_ , lowerCAmelCase_ , prediction_loss_only=lowerCAmelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCAmelCase_ , self.quant_trainer_args )
_a = model
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : str = "eval" ) -> List[Any]:
"""simple docstring"""
_a = self.eval_dataset if eval_dataset is None else eval_dataset
_a = self.get_eval_dataloader(lowerCAmelCase_ )
_a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_a = self.compute_metrics
_a = None
_a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_a = eval_loop(
lowerCAmelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , )
finally:
_a = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_a = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , output.predictions )
_a = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
_a = metrics.pop(lowerCAmelCase_ )
self.log(lowerCAmelCase_ )
else:
_a = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_a = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase_ )
return metrics
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=None , lowerCAmelCase_ : str = "test" ) -> List[str]:
"""simple docstring"""
_a = self.get_test_dataloader(lowerCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
_a = self.compute_metrics
_a = None
_a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_a = eval_loop(
lowerCAmelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , )
finally:
_a = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_a = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , output.predictions , '''predict''' )
_a = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
_a = metrics.pop(lowerCAmelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int]="./" ) -> Optional[Any]:
"""simple docstring"""
_a = self.eval_dataset
_a = self.get_eval_dataloader(lowerCAmelCase_ )
_a = next(iter(lowerCAmelCase_ ) )
# saving device - to make it consistent
_a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
_a = tuple(v.to(lowerCAmelCase_ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
_a = True
_a = self.model.to(lowerCAmelCase_ )
model.eval()
model.float()
_a = model.module if hasattr(lowerCAmelCase_ , '''module''' ) else model
quant_trainer.configure_model(lowerCAmelCase_ , self.quant_trainer_args )
_a = os.path.join(lowerCAmelCase_ , '''model.onnx''' )
logger.info(F'exporting model to {output_model_file}' )
_a = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , export_params=lowerCAmelCase_ , opset_version=13 , do_constant_folding=lowerCAmelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=lowerCAmelCase_ , )
logger.info('''onnx export finished''' )
| 377 |
'''simple docstring'''
_snake_case : Any = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def snake_case_ (UpperCamelCase : dict , UpperCamelCase : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = set()
# keep track of all the paths to be checked
_a = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a = queue.pop(0 )
# get the last node from the path
_a = path[-1]
if node not in explored:
_a = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a = list(UpperCamelCase )
new_path.append(UpperCamelCase )
queue.append(UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def snake_case_ (UpperCamelCase : dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a = [start]
_a = set(UpperCamelCase )
# Keep tab on distances from `start` node.
_a = {start: 0, target: -1}
while queue:
_a = queue.pop(0 )
if node == target:
_a = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCamelCase )
queue.append(UpperCamelCase )
_a = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 377 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=8 ):
__lowercase : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowercase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowercase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
if latents is None:
__lowercase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowercase : Optional[Any] = latents.to(UpperCamelCase_ )
__lowercase : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def _lowerCamelCase ( self , UpperCamelCase_=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__lowercase : Tuple = torch.device(F"""cuda:{gpu_id}""" )
__lowercase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_=0 ) -> Any:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__lowercase : str = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowercase ,__lowercase : Any = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowercase : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCamelCase ( self ) -> List[Any]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 5_12 , UpperCamelCase_ = 5_12 , UpperCamelCase_ = 1_00 , UpperCamelCase_ = 4.0 , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ) -> Union[str, Any]:
__lowercase : Optional[int] = self._execution_device
__lowercase : int = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Tuple = torch.cat(UpperCamelCase_ , dim=0 )
__lowercase : List[str] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Any = torch.cat(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
__lowercase : Any = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowercase : List[str] = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowercase : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowercase : Dict = self.scheduler.timesteps
__lowercase : List[Any] = self.unet.config.in_channels
__lowercase ,__lowercase : Any = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowercase : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowercase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase : List[Any] = {'''image_embeds''': image_embeds}
__lowercase : Dict = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowercase ,__lowercase : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
__lowercase ,__lowercase : Optional[int] = noise_pred.chunk(2 )
__lowercase ,__lowercase : int = variance_pred.chunk(2 )
__lowercase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase ,__lowercase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase : int = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowercase : Dict = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__lowercase : int = image * 0.5 + 0.5
__lowercase : int = image.clamp(0 , 1 )
__lowercase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase : Optional[Any] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 76 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_A = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
for attribute in key.split(""".""" ):
a_ = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
a_ = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
a_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(UpperCamelCase )[0].split(""".""" )[-2]
a_ = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
a_ = """weight_g"""
elif "weight_v" in name:
a_ = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
a_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ = """weight"""
else:
a_ = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
a_ = full_name.split("""conv_layers.""" )[-1]
a_ = name.split(""".""" )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
a_ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
a_ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
a_ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
a_ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int=None ) -> List[Any]:
"""simple docstring"""
a_ = torch.load(UpperCamelCase )
a_ = WavLMConfigOrig(checkpoint["""cfg"""] )
a_ = WavLMOrig(UpperCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
a_ = WavLMConfig.from_pretrained(UpperCamelCase )
else:
a_ = WavLMConfig()
a_ = WavLMModel(UpperCamelCase )
recursively_load_weights(UpperCamelCase , UpperCamelCase )
hf_wavlm.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_A = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 403 |
import argparse
import os
import re
_A = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_A = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_A = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_A = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_A = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_A = re.compile(r'\[([^\]]+)\]')
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
a_ = _re_indent.search(UpperCamelCase )
return "" if search is None else search.groups()[0]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]="" , UpperCamelCase : int=None , UpperCamelCase : Dict=None ) -> List[str]:
"""simple docstring"""
a_ = 0
a_ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase ):
index += 1
a_ = ["""\n""".join(lines[:index] )]
else:
a_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a_ = [lines[index]]
index += 1
while index < len(UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(UpperCamelCase ) )
if index < len(UpperCamelCase ) - 1:
a_ = [lines[index + 1]]
index += 1
else:
a_ = []
else:
blocks.append("""\n""".join(UpperCamelCase ) )
a_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase ) > 0:
blocks.append("""\n""".join(UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
def _inner(UpperCamelCase : Optional[int] ):
return key(UpperCamelCase ).lower().replace("""_""" , """""" )
return _inner
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
def noop(UpperCamelCase : Tuple ):
return x
if key is None:
a_ = noop
# Constants are all uppercase, they go first.
a_ = [obj for obj in objects if key(UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a_ = [obj for obj in objects if key(UpperCamelCase )[0].isupper() and not key(UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
a_ = [obj for obj in objects if not key(UpperCamelCase )[0].isupper()]
a_ = ignore_underscore(UpperCamelCase )
return sorted(UpperCamelCase , key=UpperCamelCase ) + sorted(UpperCamelCase , key=UpperCamelCase ) + sorted(UpperCamelCase , key=UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
def _replace(UpperCamelCase : Tuple ):
a_ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
a_ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(UpperCamelCase )] ) + "]"
a_ = import_statement.split("""\n""" )
if len(UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a_ = 2 if lines[1].strip() == """[""" else 1
a_ = [(i, _re_strip_line.search(UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a_ = sort_objects(UpperCamelCase , key=lambda UpperCamelCase : x[1] )
a_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a_ = _re_bracket_content.sub(_replace , lines[1] )
else:
a_ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ = keys[:-1]
a_ = get_indent(lines[1] ) + """, """.join([F"""\"{k}\"""" for k in sort_objects(UpperCamelCase )] )
return "\n".join(UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
a_ = _re_bracket_content.sub(_replace , UpperCamelCase )
return import_statement
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : str=True ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase , """r""" ) as f:
a_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a_ = split_code_in_indented_blocks(
UpperCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a_ = main_blocks[block_idx]
a_ = block.split("""\n""" )
# Get to the start of the imports.
a_ = 0
while line_idx < len(UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a_ = len(UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
a_ = """\n""".join(block_lines[line_idx:-1] )
a_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a_ = split_code_in_indented_blocks(UpperCamelCase , indent_level=UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
a_ = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a_ = [(pattern.search(UpperCamelCase ).groups()[0] if pattern.search(UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a_ = [(i, key) for i, key in enumerate(UpperCamelCase ) if key is not None]
a_ = [x[0] for x in sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a_ = 0
a_ = []
for i in range(len(UpperCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
a_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
a_ = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(UpperCamelCase , """w""" ) as f:
f.write("""\n""".join(UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[Any]=True ) -> List[str]:
"""simple docstring"""
a_ = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
a_ = sort_imports(os.path.join(UpperCamelCase , """__init__.py""" ) , check_only=UpperCamelCase )
if result:
a_ = [os.path.join(UpperCamelCase , """__init__.py""" )]
if len(UpperCamelCase ) > 0:
raise ValueError(F"""Would overwrite {len(UpperCamelCase )} files, run `make style`.""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_A = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 403 | 1 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = None
UpperCAmelCase : int = BloomTokenizerFast
UpperCAmelCase : Union[str, Any] = BloomTokenizerFast
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Any = False
UpperCAmelCase : str = '''tokenizer_file'''
UpperCAmelCase : Tuple = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
_A = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : int ):
_A = self.get_rust_tokenizer()
_A = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_A = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
_A = tokenizer.batch_encode_plus(_UpperCAmelCase )['input_ids']
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_A = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_A = 'This is a simple input'
_A = ['This is a simple input 1', 'This is a simple input 2']
_A = ('This is a simple input', 'This is a pair')
_A = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
_A = None # Hotfixing padding = None
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.get_rust_tokenizer()
_A = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_UpperCAmelCase )
_A = next(iter(_UpperCAmelCase ) )['premise'] # pick up one data
_A = list(sample_data.values() )
_A = list(map(tokenizer.encode , _UpperCAmelCase ) )
_A = [tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : int ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 7 |
"""simple docstring"""
def UpperCAmelCase ( _lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ = len(_lowercase )
lowerCAmelCase_ = sum(_lowercase )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff | 552 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
snake_case_ = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase__ ( ) -> Dict:
__snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__snake_case = bs[:]
__snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__snake_case = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def lowerCamelCase__ ( snake_case_ : Any ) -> Tuple:
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
return pairs
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
A_ : Any = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[Any] = ['input_ids', 'attention_mask']
def __init__(self : str , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any]="replace" , a__ : Optional[Any]="<s>" , a__ : int="</s>" , a__ : str="</s>" , a__ : List[str]="<s>" , a__ : Optional[int]="<unk>" , a__ : List[str]="<pad>" , a__ : Union[str, Any]="<mask>" , a__ : Tuple=False , **a__ : Optional[Any] , ):
"""simple docstring"""
__snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
__snake_case = json.load(__lowerCamelCase )
__snake_case = {v: k for k, v in self.encoder.items()}
__snake_case = errors # how to handle errors in decoding
__snake_case = bytes_to_unicode()
__snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
__snake_case = merges_handle.read().split('''\n''' )[1:-1]
__snake_case = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__snake_case = {}
__snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a (self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def a (self : Union[str, Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a (self : Tuple , a__ : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__snake_case = tuple(__lowerCamelCase )
__snake_case = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__snake_case = min(__lowerCamelCase , key=lambda a__ : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(__lowerCamelCase ):
try:
__snake_case = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(__lowerCamelCase )
__snake_case = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__snake_case = get_pairs(__lowerCamelCase )
__snake_case = " ".join(__lowerCamelCase )
__snake_case = word
return word
def a (self : Tuple , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for token in re.findall(self.pat , __lowerCamelCase ):
__snake_case = "".join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def a (self : Any , a__ : int ):
"""simple docstring"""
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def a (self : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
return self.decoder.get(__lowerCamelCase )
def a (self : Union[str, Any] , a__ : Tuple ):
"""simple docstring"""
__snake_case = "".join(__lowerCamelCase )
__snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def a (self : Optional[int] , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '''\n''' )
__snake_case = 0
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__snake_case = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def a (self : int , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a (self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def a (self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a (self : List[str] , a__ : int , a__ : Dict=False , **a__ : List[str] ):
"""simple docstring"""
__snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__snake_case = " " + text
return (text, kwargs)
def a (self : Dict , a__ : Union[Dict[str, EncodedInput], BatchEncoding] , a__ : Optional[int] = None , a__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , a__ : Optional[int] = None , a__ : Optional[bool] = None , ):
"""simple docstring"""
__snake_case = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__lowerCamelCase )
if needs_to_be_padded:
__snake_case = len(__lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 705 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case_ = 16
snake_case_ = 32
def lowerCamelCase__ ( snake_case_ : Accelerator , snake_case_ : int = 16 ) -> List[str]:
__snake_case = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__snake_case = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : int ):
# max_length=None => use the model max length (it's actually the default)
__snake_case = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case = 16
elif accelerator.mixed_precision != "no":
__snake_case = 8
else:
__snake_case = None
return tokenizer.pad(
snake_case_ , padding='''longest''' , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__snake_case = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__snake_case = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case_ = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[Any] ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case_ ) == "1":
__snake_case = 2
# New Code #
__snake_case = int(args.gradient_accumulation_steps )
__snake_case = int(args.local_sgd_steps )
# Initialize accelerator
__snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case = config['''lr''']
__snake_case = int(config['''num_epochs'''] )
__snake_case = int(config['''seed'''] )
__snake_case = int(config['''batch_size'''] )
__snake_case = evaluate.load('''glue''' , '''mrpc''' )
set_seed(snake_case_ )
__snake_case , __snake_case = get_dataloaders(snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case = model.to(accelerator.device )
# Instantiate optimizer
__snake_case = AdamW(params=model.parameters() , lr=snake_case_ )
# Instantiate scheduler
__snake_case = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=100 , num_training_steps=(len(snake_case_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
with LocalSGD(
accelerator=snake_case_ , model=snake_case_ , local_sgd_steps=snake_case_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
__snake_case = model(**snake_case_ )
__snake_case = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case = model(**snake_case_ )
__snake_case = outputs.logits.argmax(dim=-1 )
__snake_case , __snake_case = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case_ )
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case_ , default=snake_case_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=snake_case_ , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__snake_case = parser.parse_args()
__snake_case = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 388 | 0 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
if hor == 128:
lowercase__ : List[str]= ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
lowercase__ : List[str]= (32, 128, 256)
lowercase__ : Any= ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
lowercase__ : str= ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
lowercase__ : List[Any]= (32, 64, 128, 256)
lowercase__ : Tuple= ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
lowercase__ : Optional[int]= torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowercase__ : str= model.state_dict()
lowercase__ : Optional[Any]= {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 65_536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
lowercase__ : Dict= UNetaDModel(**lowerCamelCase_ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowercase__ : List[Any]= dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase__ : str= state_dict.pop(lowerCamelCase_ )
hf_value_function.load_state_dict(lowerCamelCase_ )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def lowercase__() ->Optional[Any]:
"""simple docstring"""
lowercase__ : str= {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 65_536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
lowercase__ : Optional[int]= torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
lowercase__ : Optional[int]= model
lowercase__ : List[str]= UNetaDModel(**lowerCamelCase_ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowercase__ : str= dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase__ : Union[str, Any]= state_dict.pop(lowerCamelCase_ )
hf_value_function.load_state_dict(lowerCamelCase_ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 218 |
from collections.abc import Callable
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase = None ) -> None:
"""simple docstring"""
lowerCAmelCase__ : list = []
# Stores indexes of each item for supporting updates and deletion.
lowerCAmelCase__ : dict = {}
# Stores current size of heap.
lowerCAmelCase__ : Any = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCAmelCase__ : Union[str, Any] = key or (lambda __lowerCamelCase : x)
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int | None:
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int | None:
"""simple docstring"""
lowerCAmelCase__ : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int | None:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.arr[j], self.arr[i]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> bool:
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = self._left(__lowerCamelCase )
lowerCAmelCase__ : List[str] = self._right(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = i
if left is not None and not self._cmp(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Any = left
if right is not None and not self._cmp(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Union[str, Any] = right
return valid_parent
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self._parent(__lowerCamelCase )
while parent is not None and not self._cmp(__lowerCamelCase ,__lowerCamelCase ):
self._swap(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = parent, self._parent(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Any = self._get_valid_parent(__lowerCamelCase )
while valid_parent != index:
self._swap(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = valid_parent, self._get_valid_parent(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
lowerCAmelCase__ : str = self.pos_map[item]
lowerCAmelCase__ : Tuple = [item, self.key(__lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__lowerCamelCase )
self._heapify_down(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
lowerCAmelCase__ : Optional[int] = self.pos_map[item]
del self.pos_map[item]
lowerCAmelCase__ : Dict = self.arr[self.size - 1]
lowerCAmelCase__ : Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__lowerCamelCase )
self._heapify_down(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Dict = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__lowerCamelCase )] )
else:
lowerCAmelCase__ : List[str] = [item, self.key(__lowerCamelCase )]
lowerCAmelCase__ : List[str] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCAmelCase__ (self ) -> tuple | None:
"""simple docstring"""
return self.arr[0] if self.size else None
def lowerCAmelCase__ (self ) -> tuple | None:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCAmelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase ( snake_case__ : int )-> Tuple:
def is_in_circle(snake_case__ : float , snake_case__ : float ) -> bool:
A_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
A_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
A_ = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def lowerCAmelCase ( snake_case__ : int , snake_case__ : Callable[[float], float] , snake_case__ : float = 0.0 , snake_case__ : float = 1.0 , )-> str:
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase ( snake_case__ : int , snake_case__ : float = 0.0 , snake_case__ : float = 1.0 )-> Dict:
def identity_function(snake_case__ : float ) -> float:
return x
A_ = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def lowerCAmelCase ( snake_case__ : int )-> str:
def function_to_integrate(snake_case__ : float ) -> float:
return sqrt(4.0 - x * x )
A_ = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , __UpperCamelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
A_ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def lowercase_ ( self ):
A_ = "sshleifer/tiny-gpt2"
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
A_ = "sgugger/tiny-distilbert-classification"
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
A_ = "sshleifer/tiny-gpt2"
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
A_ = "sshleifer/tiny-gpt2"
A_ = AutoConfig.from_pretrained(__UpperCamelCase )
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase , [config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
A_ = "sshleifer/tiny-gpt2"
A_ = AutoConfig.from_pretrained(__UpperCamelCase )
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase , [config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
A_ = "sshleifer/tiny-gpt2"
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self ):
A_ = "sshleifer/tiny-gpt2"
A_ = AutoConfig.from_pretrained(__UpperCamelCase )
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase , [config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self ):
A_ = "patrickvonplaten/t5-tiny-random"
A_ = AutoConfig.from_pretrained(__UpperCamelCase )
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase , configs=[config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowercase_ ( self ):
A_ = "sshleifer/tiny-gpt2"
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCamelCase , multi_process=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
A_ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__UpperCamelCase , "env.csv" ) , multi_process=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "env.csv" ) ).exists() )
def lowercase_ ( self ):
A_ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCamelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCamelCase , "current" ) )
self.assertTrue(hasattr(__UpperCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , "log.txt" ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , )
A_ = TensorFlowBenchmark(__UpperCamelCase )
A_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "log.txt" ) ).exists() )
| 608 | 0 |
'''simple docstring'''
import math
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00_01 ) -> int:
try:
__lowerCamelCase : List[str] = int(UpperCAmelCase_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
__lowerCamelCase : list[int] = []
__lowerCamelCase : int = 2
while len(UpperCAmelCase_ ) < nth:
if is_prime(UpperCAmelCase_ ):
primes.append(UpperCAmelCase_ )
num += 1
else:
num += 1
return primes[len(UpperCAmelCase_ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any , __snake_case : int , __snake_case : int ) -> Any:
UpperCAmelCase : Optional[Any] = jnp.ones((batch_size, length) ) / length
return scores
def A ( self : str ) -> List[Any]:
UpperCAmelCase : Dict = None
UpperCAmelCase : int = 20
UpperCAmelCase : Dict = self._get_uniform_logits(batch_size=2 , length=__snake_case )
# tweak scores to not be uniform anymore
UpperCAmelCase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase : Optional[Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase : Optional[Any] = jax.nn.softmax(__snake_case , axis=-1 )
UpperCAmelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase : str = jax.nn.softmax(temp_dist_warper_sharper(__snake_case , scores.copy() , cur_len=__snake_case ) , axis=-1 )
UpperCAmelCase : Dict = jax.nn.softmax(temp_dist_warper_smoother(__snake_case , scores.copy() , cur_len=__snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def A ( self : Union[str, Any] ) -> Any:
UpperCAmelCase : Tuple = None
UpperCAmelCase : Union[str, Any] = 10
UpperCAmelCase : Tuple = 2
# create ramp distribution
UpperCAmelCase : Any = np.broadcast_to(np.arange(__snake_case )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase : Optional[int] = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase : List[str] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : int = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase : List[str] = 5
UpperCAmelCase : Optional[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase : Dict = np.broadcast_to(np.arange(__snake_case )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase : List[Any] = top_k_warp_safety_check(__snake_case , __snake_case , cur_len=__snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[int] = 10
UpperCAmelCase : Optional[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCAmelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase : Dict = np.exp(top_p_warp(__snake_case , __snake_case , cur_len=__snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase : Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase : Union[str, Any] = np.broadcast_to(np.arange(__snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase : List[str] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
UpperCAmelCase : Optional[int] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase : Optional[int] = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def A ( self : str ) -> str:
UpperCAmelCase : Dict = 20
UpperCAmelCase : Union[str, Any] = 4
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case )
# check that min length is applied at length 5
UpperCAmelCase : List[str] = ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase : int = 5
UpperCAmelCase : List[Any] = self._get_uniform_logits(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = min_dist_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase : Any = self._get_uniform_logits(__snake_case , __snake_case )
UpperCAmelCase : Any = 15
UpperCAmelCase : Optional[int] = min_dist_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertFalse(jnp.isinf(__snake_case ).any() )
def A ( self : Optional[int] ) -> Dict:
UpperCAmelCase : Optional[int] = 20
UpperCAmelCase : List[Any] = 4
UpperCAmelCase : int = 0
UpperCAmelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase : Dict = ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Dict = self._get_uniform_logits(__snake_case , __snake_case )
UpperCAmelCase : int = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase : str = 3
UpperCAmelCase : str = self._get_uniform_logits(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertFalse(jnp.isinf(__snake_case ).any() )
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase : Dict = 20
UpperCAmelCase : Optional[int] = 4
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Optional[Any] = 5
UpperCAmelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Tuple = self._get_uniform_logits(__snake_case , __snake_case )
UpperCAmelCase : Dict = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase : Optional[Any] = 3
UpperCAmelCase : Any = self._get_uniform_logits(__snake_case , __snake_case )
UpperCAmelCase : int = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertFalse(jnp.isinf(__snake_case ).any() )
def A ( self : Tuple ) -> int:
UpperCAmelCase : Any = 4
UpperCAmelCase : List[str] = 10
UpperCAmelCase : Tuple = 15
UpperCAmelCase : Any = 2
UpperCAmelCase : Any = 1
UpperCAmelCase : int = 15
# dummy input_ids and scores
UpperCAmelCase : List[Any] = ids_tensor((batch_size, sequence_length) , __snake_case )
UpperCAmelCase : Optional[int] = input_ids.copy()
UpperCAmelCase : Tuple = self._get_uniform_logits(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = scores.copy()
# instantiate all dist processors
UpperCAmelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : Optional[int] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case )
UpperCAmelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case )
UpperCAmelCase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase : Dict = 10
# no processor list
UpperCAmelCase : Optional[Any] = temp_dist_warp(__snake_case , __snake_case , cur_len=__snake_case )
UpperCAmelCase : Dict = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case )
UpperCAmelCase : Union[str, Any] = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case )
UpperCAmelCase : Dict = min_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
UpperCAmelCase : str = bos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
UpperCAmelCase : Union[str, Any] = eos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
# with processor list
UpperCAmelCase : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase : Any = processor(__snake_case , __snake_case , cur_len=__snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(__snake_case , __snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : List[Any] = 4
UpperCAmelCase : Optional[int] = 10
UpperCAmelCase : Tuple = 15
UpperCAmelCase : int = 2
UpperCAmelCase : Any = 1
UpperCAmelCase : str = 15
# dummy input_ids and scores
UpperCAmelCase : Tuple = ids_tensor((batch_size, sequence_length) , __snake_case )
UpperCAmelCase : Tuple = input_ids.copy()
UpperCAmelCase : Optional[Any] = self._get_uniform_logits(__snake_case , __snake_case )
UpperCAmelCase : Dict = scores.copy()
# instantiate all dist processors
UpperCAmelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : List[Any] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case )
UpperCAmelCase : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case )
UpperCAmelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(__snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] ):
UpperCAmelCase : Dict = temp_dist_warp(__snake_case , __snake_case , cur_len=__snake_case )
UpperCAmelCase : List[Any] = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case )
UpperCAmelCase : List[str] = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case )
UpperCAmelCase : List[str] = min_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
UpperCAmelCase : Union[str, Any] = bos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
UpperCAmelCase : Optional[int] = eos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
return scores
# with processor list
def run_processor_list(__snake_case : int , __snake_case : str , __snake_case : Dict ):
UpperCAmelCase : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase : int = processor(__snake_case , __snake_case , cur_len=__snake_case )
return scores
UpperCAmelCase : Tuple = jax.jit(__snake_case )
UpperCAmelCase : Any = jax.jit(__snake_case )
UpperCAmelCase : Optional[Any] = jitted_run_no_processor_list(__snake_case , __snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = jitted_run_processor_list(__snake_case , __snake_case , __snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(__snake_case , __snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 716 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : UNetaDModel , __snake_case : DDPMScheduler , __snake_case : List[Any] , ) -> List[str]:
super().__init__()
UpperCAmelCase : Any = value_function
UpperCAmelCase : List[str] = unet
UpperCAmelCase : Dict = scheduler
UpperCAmelCase : Union[str, Any] = env
UpperCAmelCase : Dict = env.get_dataset()
UpperCAmelCase : Tuple = {}
for key in self.data.keys():
try:
UpperCAmelCase : Optional[int] = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase : Any = {}
for key in self.data.keys():
try:
UpperCAmelCase : str = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase : int = env.observation_space.shape[0]
UpperCAmelCase : Optional[Any] = env.action_space.shape[0]
def A ( self : Any , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
return (x_in - self.means[key]) / self.stds[key]
def A ( self : Dict , __snake_case : Any , __snake_case : Dict ) -> Any:
return x_in * self.stds[key] + self.means[key]
def A ( self : Any , __snake_case : int ) -> List[Any]:
if type(__snake_case ) is dict:
return {k: self.to_torch(__snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(__snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(__snake_case , device=self.unet.device )
def A ( self : int , __snake_case : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] ) -> Dict:
for key, val in cond.items():
UpperCAmelCase : List[Any] = val.clone()
return x_in
def A ( self : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict ) -> Any:
UpperCAmelCase : Any = x.shape[0]
UpperCAmelCase : Any = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase : Tuple = torch.full((batch_size,) , __snake_case , device=self.unet.device , dtype=torch.long )
for _ in range(__snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase : Any = self.value_function(x.permute(0 , 2 , 1 ) , __snake_case ).sample
UpperCAmelCase : Dict = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase : Tuple = self.scheduler._get_variance(__snake_case )
UpperCAmelCase : Optional[int] = torch.exp(0.5 * posterior_variance )
UpperCAmelCase : str = model_std * grad
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = x.detach()
UpperCAmelCase : List[str] = x + scale * grad
UpperCAmelCase : List[str] = self.reset_xa(__snake_case , __snake_case , self.action_dim )
UpperCAmelCase : str = self.unet(x.permute(0 , 2 , 1 ) , __snake_case ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase : Optional[int] = self.scheduler.step(__snake_case , __snake_case , __snake_case , predict_epsilon=__snake_case )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase : Any = self.reset_xa(__snake_case , __snake_case , self.action_dim )
UpperCAmelCase : List[str] = self.to_torch(__snake_case )
return x, y
def __call__( self : Any , __snake_case : List[str] , __snake_case : Optional[Any]=64 , __snake_case : Optional[int]=32 , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=0.1 ) -> Tuple:
# normalize the observations and create batch dimension
UpperCAmelCase : Optional[Any] = self.normalize(__snake_case , '''observations''' )
UpperCAmelCase : Optional[int] = obs[None].repeat(__snake_case , axis=0 )
UpperCAmelCase : Tuple = {0: self.to_torch(__snake_case )}
UpperCAmelCase : Tuple = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase : Union[str, Any] = randn_tensor(__snake_case , device=self.unet.device )
UpperCAmelCase : int = self.reset_xa(__snake_case , __snake_case , self.action_dim )
UpperCAmelCase : List[Any] = self.to_torch(__snake_case )
# run the diffusion process
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.run_diffusion(__snake_case , __snake_case , __snake_case , __snake_case )
# sort output trajectories by value
UpperCAmelCase : str = y.argsort(0 , descending=__snake_case ).squeeze()
UpperCAmelCase : Tuple = x[sorted_idx]
UpperCAmelCase : List[Any] = sorted_values[:, :, : self.action_dim]
UpperCAmelCase : str = actions.detach().cpu().numpy()
UpperCAmelCase : List[str] = self.de_normalize(__snake_case , key='''actions''' )
# select the action with the highest value
if y is not None:
UpperCAmelCase : List[Any] = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase : Optional[int] = np.random.randint(0 , __snake_case )
UpperCAmelCase : Optional[int] = denorm_actions[selected_index, 0]
return denorm_actions
| 528 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_UpperCamelCase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {F'''funnel-transformer/{name}''': 512 for name in _model_names}
_UpperCamelCase = {F'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Any =FunnelTokenizer
UpperCAmelCase_ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int =2
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="<unk>" , UpperCAmelCase="<sep>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase="##" , **UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , clean_text=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , wordpieces_prefix=UpperCAmelCase , **UpperCAmelCase , )
__snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
__snake_case : Union[str, Any] = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
__snake_case : List[str] = do_lower_case
__snake_case : int = strip_accents
__snake_case : Any = tokenize_chinese_chars
__snake_case : List[str] = normalizer_class(**UpperCAmelCase )
__snake_case : Any = do_lower_case
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Any:
'''simple docstring'''
__snake_case : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : Any = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : List[Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 243 |
def lowerCAmelCase__( lowercase : list ) -> list:
__snake_case : str = len(lowercase )
for _ in range(lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : str = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCamelCase = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 243 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_lowercase = namedtuple('''covid_data''', '''cases deaths recovered''')
def _A (UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ) ->covid_data:
'''simple docstring'''
lowerCamelCase__ : int = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(UpperCamelCase ).content ).xpath(UpperCamelCase ) )
_lowercase = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 711 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _A (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ) ->Any:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TaConfig.from_json_file(UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
lowerCamelCase__ : Optional[int] = TaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 96 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class __lowercase ( A ):
'''simple docstring'''
_A : Tuple = '''rwkv'''
_A : Tuple = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[Any] , _a : List[Any]=50_277 , _a : str=1_024 , _a : Dict=4_096 , _a : Optional[int]=32 , _a : Union[str, Any]=None , _a : Any=None , _a : Tuple=1E-5 , _a : List[str]=0 , _a : List[Any]=0 , _a : Tuple=6 , _a : List[str]=False , _a : List[str]=True , **_a : Tuple , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = context_length
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCamelCase__ = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = rescale_every
UpperCamelCase__ = use_cache
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 240 | import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : Dict = RoCBertTokenizer
_A : Optional[Any] = None
_A : Dict = False
_A : List[str] = True
_A : Optional[int] = filter_non_english
def A_ ( self : int ):
super().setUp()
UpperCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for i, value in enumerate(_a ):
UpperCamelCase__ = i
UpperCamelCase__ = i
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_a , _a , ensure_ascii=_a )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_a , _a , ensure_ascii=_a )
def A_ ( self : List[str] ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_a , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_a ) , [5, 6, 2, 5, 7, 8] )
def A_ ( self : Tuple ):
UpperCamelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A_ ( self : List[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : Any ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : str ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : List[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : Any ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCamelCase__ = {}
for i, token in enumerate(_a ):
UpperCamelCase__ = i
UpperCamelCase__ = RoCBertWordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A_ ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A_ ( self : Optional[Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A_ ( self : List[str] ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
UpperCamelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def A_ ( self : Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCamelCase__ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
UpperCamelCase__ = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
UpperCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A_ ( self : Any ):
UpperCamelCase__ = ['''的''', '''人''', '''有''']
UpperCamelCase__ = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = True
UpperCamelCase__ = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = tokenizer_p.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(_a )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
UpperCamelCase__ = False
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = tokenizer_r.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_p.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(_a )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase__ = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
@slow
def A_ ( self : str ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ = tokenizer.encode('''你好''' , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.encode('''你是谁''' , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(_a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase__ = '''你好,你是谁'''
UpperCamelCase__ = tokenizer.tokenize(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_shape_ids(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(_a )
UpperCamelCase__ = tokenizer.prepare_for_model(
_a , _a , _a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.encode_plus(_a , add_special_tokens=_a )
self.assertEqual(_a , _a )
| 240 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( __A ):
snake_case_ : UNetaDModel
snake_case_ : ScoreSdeVeScheduler
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self , lowerCamelCase__ = 1 , lowerCamelCase__ = 2000 , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , **lowerCamelCase__ , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
__lowercase = self.unet.config.sample_size
__lowercase = (batch_size, 3, img_size, img_size)
__lowercase = self.unet
__lowercase = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ ) * self.scheduler.init_noise_sigma
__lowercase = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase__ )
self.scheduler.set_sigmas(lowerCamelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowercase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__lowercase = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
__lowercase = self.scheduler.step_correct(lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample
# prediction step
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ ).sample
__lowercase = self.scheduler.step_pred(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
__lowercase ,__lowercase = output.prev_sample, output.prev_sample_mean
__lowercase = sample_mean.clamp(0 , 1 )
__lowercase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 163 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=30 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = FlaxViTModel(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowercase = (self.image_size, self.image_size)
__lowercase = (self.patch_size, self.patch_size)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = FlaxViTForImageClassification(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxViTForImageClassification(lowerCamelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(lowerCamelCase__ )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE( __A , unittest.TestCase ):
snake_case_ : Optional[Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def snake_case__ ( self ) -> None:
"""simple docstring"""
__lowercase = FlaxViTModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , **lowerCamelCase__ ):
return model(pixel_values=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest("""JIT Enabled""" ):
__lowercase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__lowercase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 163 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True, True
__SCREAMING_SNAKE_CASE = dfs(A_ , A_ , A_ , A_ )
return path
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = -1
for i in range(A_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__SCREAMING_SNAKE_CASE = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = check_circuit_or_path(A_ , A_ )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
__SCREAMING_SNAKE_CASE = 1
if check == 2:
__SCREAMING_SNAKE_CASE = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
__SCREAMING_SNAKE_CASE = dfs(A_ , A_ , A_ )
print(A_ )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__SCREAMING_SNAKE_CASE = {
1: [],
2: []
# all degree is zero
}
__SCREAMING_SNAKE_CASE = 10
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
if __name__ == "__main__":
main()
| 682 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowerCamelCase : List[str] = [
"good first issue",
"feature request",
"wip",
]
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Github(os.environ['GITHUB_TOKEN'] )
UpperCamelCase = g.get_repo('huggingface/accelerate' )
UpperCamelCase = repo.get_issues(state='open' )
for issue in open_issues:
UpperCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda A__ : i.created_at , reverse=A__ )
UpperCamelCase = comments[0] if len(A__ ) > 0 else None
UpperCamelCase = dt.utcnow()
UpperCamelCase = (current_time - issue.updated_at).days
UpperCamelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : VQModel , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : int = 5_0 , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Any , ):
"""simple docstring"""
UpperCamelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase__ , )
UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
UpperCamelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase = self.vqvae.decode(UpperCamelCase__ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 324 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''philschmid/bart-large-cnn-samsum'''
__SCREAMING_SNAKE_CASE : Any = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__SCREAMING_SNAKE_CASE : Optional[int] = '''summarizer'''
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer
__SCREAMING_SNAKE_CASE : int = AutoModelForSeqaSeqLM
__SCREAMING_SNAKE_CASE : Tuple = ['''text''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''text''']
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.model.generate(**SCREAMING_SNAKE_CASE__ )[0]
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.pre_processor.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
| 47 |
"""simple docstring"""
import itertools
import math
def lowerCamelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = 2
while True:
if is_prime(_UpperCamelCase ):
yield num
num += 1
def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , _UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 139 | 0 |
'''simple docstring'''
import re
def snake_case ( a_ : str ) -> bool:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(a_ , a_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 543 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase ="pt"
elif is_tf_available():
UpperCamelCase ="tf"
else:
UpperCamelCase ="jax"
class A ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
"""simple docstring"""
__a : int = PerceiverTokenizer
__a : List[str] = False
def _UpperCAmelCase ( self ):
super().setUp()
UpperCamelCase_ : Optional[int] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self ):
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=20 , __lowerCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCamelCase_ : str = []
for i in range(len(__lowerCAmelCase ) ):
try:
UpperCamelCase_ : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCamelCase_ : Optional[Any] = list(filter(lambda __lowerCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __lowerCAmelCase ) )
UpperCamelCase_ : int = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
UpperCamelCase_ : List[Any] = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
UpperCamelCase_ : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase_ : List[Any] = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase_ : str = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
UpperCamelCase_ : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
UpperCamelCase_ : Tuple = """ """ + output_txt
UpperCamelCase_ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.perceiver_tokenizer
UpperCamelCase_ : Optional[int] = """Unicode €."""
UpperCamelCase_ : Dict = tokenizer(__lowerCAmelCase )
UpperCamelCase_ : Tuple = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
UpperCamelCase_ : str = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """[CLS]Unicode €.[SEP]""" )
UpperCamelCase_ : str = tokenizer("""e è é ê ë""" )
UpperCamelCase_ : List[str] = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
UpperCamelCase_ : List[str] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.perceiver_tokenizer
UpperCamelCase_ : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
UpperCamelCase_ : int = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
UpperCamelCase_ : Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
if FRAMEWORK != "jax":
UpperCamelCase_ : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCamelCase_ : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.perceiver_tokenizer
UpperCamelCase_ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_ : int = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __lowerCAmelCase )
self.assertIn("""attention_mask""" , __lowerCAmelCase )
self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.perceiver_tokenizer
UpperCamelCase_ : List[str] = [
"""Summary of the text.""",
"""Another summary.""",
]
UpperCamelCase_ : Dict = tokenizer(
text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _UpperCAmelCase ( self ):
# safety check on max_len default value so we are sure the test works
UpperCamelCase_ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCamelCase_ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase_ : Tuple = tempfile.mkdtemp()
UpperCamelCase_ : Optional[int] = """ He is very happy, UNwant\u00E9d,running"""
UpperCamelCase_ : Dict = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Tuple = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase_ : Tuple = tempfile.mkdtemp()
UpperCamelCase_ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
UpperCamelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
UpperCamelCase_ : Union[str, Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Tuple = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : List[Any] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCamelCase_ : int = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase_ : Union[str, Any] = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase_ : List[str] = json.load(__lowerCAmelCase )
UpperCamelCase_ : Tuple = [F"<extra_id_{i}>" for i in range(1_25 )]
UpperCamelCase_ : Union[str, Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
UpperCamelCase_ : List[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase_ : Optional[int] = tokenizer_class.from_pretrained(
__lowerCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase_ : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__lowerCAmelCase )]
UpperCamelCase_ : Union[str, Any] = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , """�""" )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCamelCase_ : Dict = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCamelCase_ : str = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
UpperCamelCase_ : Optional[Any] = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
| 543 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__lowerCamelCase : Union[str, Any] = 500000
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = os.path.split(__file__)
__lowerCamelCase : Union[str, Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __UpperCAmelCase ( __magic_name__ ,**__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : str = dataset.map(**__magic_name__ )
@get_duration
def __UpperCAmelCase ( __magic_name__ ,**__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = dataset.filter(**__magic_name__ )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : List[str] = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
snake_case_ : int = generate_example_dataset(
os.path.join(__magic_name__ ,"dataset.arrow" ) ,__magic_name__ ,num_examples=__magic_name__ )
snake_case_ : Tuple = transformers.AutoTokenizer.from_pretrained("bert-base-cased" ,use_fast=__magic_name__ )
def tokenize(__magic_name__ ):
return tokenizer(examples["text"] )
snake_case_ : List[str] = map(__magic_name__ )
snake_case_ : Tuple = map(__magic_name__ ,batched=__magic_name__ )
snake_case_ : Union[str, Any] = map(__magic_name__ ,function=lambda __magic_name__ : None ,batched=__magic_name__ )
with dataset.formatted_as(type="numpy" ):
snake_case_ : str = map(__magic_name__ ,function=lambda __magic_name__ : None ,batched=__magic_name__ )
with dataset.formatted_as(type="pandas" ):
snake_case_ : int = map(__magic_name__ ,function=lambda __magic_name__ : None ,batched=__magic_name__ )
with dataset.formatted_as(type="torch" ,columns="numbers" ):
snake_case_ : Union[str, Any] = map(__magic_name__ ,function=lambda __magic_name__ : None ,batched=__magic_name__ )
with dataset.formatted_as(type="tensorflow" ,columns="numbers" ):
snake_case_ : str = map(__magic_name__ ,function=lambda __magic_name__ : None ,batched=__magic_name__ )
snake_case_ : Dict = map(__magic_name__ ,function=__magic_name__ ,batched=__magic_name__ )
snake_case_ : List[str] = filter(__magic_name__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__magic_name__ ,"wb" ) as f:
f.write(json.dumps(__magic_name__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a_ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Tuple = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Union[str, Any] = ["""input_ids""", """attention_mask"""]
_A : Optional[int] = NllbTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
snake_case_ : str = legacy_behaviour
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , legacy_behaviour=lowercase__ , **lowercase__ , )
snake_case_ : str = vocab_file
snake_case_ : str = False if not self.vocab_file else True
snake_case_ : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : List[Any] = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Optional[Any] = src_lang if src_lang is not None else """eng_Latn"""
snake_case_ : Dict = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : int = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : List[Any] = src_lang
snake_case_ : str = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : str = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : List[str] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "eng_Latn" , lowercase__ = None , lowercase__ = "fra_Latn" , **lowercase__ , ):
snake_case_ : List[Any] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
snake_case_ : Dict = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ : Optional[int] = [self.cur_lang_code]
snake_case_ : List[str] = [self.eos_token_id]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ : Any = [self.cur_lang_code]
snake_case_ : Union[str, Any] = [self.eos_token_id]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Any = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : str = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 714 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json'}
UpperCamelCase__ = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
UpperCamelCase__ = {'mgp-str': 27}
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Dict="[GO]" , UpperCamelCase : Any="[GO]" , UpperCamelCase : Dict="[s]" , UpperCamelCase : int="[GO]" , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , pad_token=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_lowercase : Tuple = json.load(UpperCamelCase )
_lowercase : List[str] = {v: k for k, v in self.vocab.items()}
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return len(self.vocab )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : str , UpperCamelCase : List[Any] ):
"""simple docstring"""
_lowercase : int = []
for s in text:
char_tokens.extend(UpperCamelCase )
return char_tokens
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase : Dict ):
"""simple docstring"""
return self.vocab.get(UpperCamelCase , self.vocab.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Any , UpperCamelCase : List[str] ):
"""simple docstring"""
return self.decoder.get(UpperCamelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(UpperCamelCase ) )
return
_lowercase : str = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + '''\n''' )
return (vocab_file,) | 322 |
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
_lowercase : str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowercase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 322 | 1 |
import datasets
from .evaluate import evaluate
__UpperCAmelCase = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
__UpperCAmelCase = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
__UpperCAmelCase = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def UpperCAmelCase ( self : List[str] , a_ : str , a_ : Any ) -> List[Any]:
'''simple docstring'''
a__ : Tuple = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
a__ : List[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
a__ : Tuple = evaluate(dataset=a_ , predictions=a_ )
return score | 720 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : int , *a_ : List[str] , **a_ : Any ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ ) | 251 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DeiTFeatureExtractor"]
__A = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59 |
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A__ : int = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
A__ : Tuple = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
A__ : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : List[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Dict = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
A__ : List[str] = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Any = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
A__ : Optional[int] = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Any = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
A__ : List[str] = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : List[Any] = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
A__ : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Tuple = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
A__ : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
A__ : Tuple = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
A__ : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
A__ : Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
A__ : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
A__ : List[str] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
A__ : Union[str, Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Dict = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
A__ : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
A__ : List[str] = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
A__ : Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
A__ : Optional[Any] = ''
A__ : Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
A__ : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A__ : Dict = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) -> Dict:
assert ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ) -> str:
with pytest.raises(lowerCamelCase__ , match=re.escape(expected_error.format(path="root" ) ) ):
lowerCamelCase_ : Any =ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> int:
with pytest.raises(lowerCamelCase__ , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowerCamelCase__ : Tuple ) -> Dict:
ReadMe.from_string(lowerCamelCase__ , lowerCamelCase__ , suppress_parsing_errors=lowerCamelCase__ )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Optional[int] =Path(lowerCamelCase__ ) / "README.md"
with open(lowerCamelCase__ , "w+" ) as readme_file:
readme_file.write(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _snake_case ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : int =Path(lowerCamelCase__ ) / "README.md"
with open(lowerCamelCase__ , "w+" ) as readme_file:
readme_file.write(lowerCamelCase__ )
lowerCamelCase_ : str =expected_error.format(path=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__ ) ):
lowerCamelCase_ : Optional[Any] =ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Optional[Any] =Path(lowerCamelCase__ ) / "README.md"
with open(lowerCamelCase__ , "w+" ) as readme_file:
readme_file.write(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =expected_error.format(path=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ , match=re.escape(lowerCamelCase__ ) ):
ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _snake_case ( lowerCamelCase__ : str ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Tuple =Path(lowerCamelCase__ ) / "README.md"
with open(lowerCamelCase__ , "w+" ) as readme_file:
readme_file.write(lowerCamelCase__ )
ReadMe.from_readme(lowerCamelCase__ , lowerCamelCase__ , suppress_parsing_errors=lowerCamelCase__ )
| 153 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase_ :
def __init__( self : str, _snake_case : Collection[float] | None = None ):
'''simple docstring'''
if components is None:
snake_case : Tuple =[]
snake_case : Dict =list(_snake_case )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.__components )
def __str__( self : Dict ):
'''simple docstring'''
return "(" + ",".join(map(_snake_case, self.__components ) ) + ")"
def __add__( self : List[str], _snake_case : Vector ):
'''simple docstring'''
snake_case : Optional[Any] =len(self )
if size == len(_snake_case ):
snake_case : str =[self.__components[i] + other.component(_snake_case ) for i in range(_snake_case )]
return Vector(_snake_case )
else:
raise Exception('''must have the same size''' )
def __sub__( self : Optional[Any], _snake_case : Vector ):
'''simple docstring'''
snake_case : int =len(self )
if size == len(_snake_case ):
snake_case : int =[self.__components[i] - other.component(_snake_case ) for i in range(_snake_case )]
return Vector(_snake_case )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : Union[str, Any], _snake_case : float ):
'''simple docstring'''
...
@overload
def __mul__( self : Dict, _snake_case : Vector ):
'''simple docstring'''
...
def __mul__( self : Union[str, Any], _snake_case : float | Vector ):
'''simple docstring'''
if isinstance(_snake_case, (float, int) ):
snake_case : Dict =[c * other for c in self.__components]
return Vector(_snake_case )
elif isinstance(_snake_case, _snake_case ) and len(self ) == len(_snake_case ):
snake_case : str =len(self )
snake_case : Any =[self.__components[i] * other.component(_snake_case ) for i in range(_snake_case )]
return sum(_snake_case )
else: # error case
raise Exception('''invalid operand!''' )
def __snake_case ( self : str ):
'''simple docstring'''
return Vector(self.__components )
def __snake_case ( self : List[str], _snake_case : int ):
'''simple docstring'''
if isinstance(_snake_case, _snake_case ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def __snake_case ( self : Dict, _snake_case : int, _snake_case : float ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
snake_case : Union[str, Any] =value
def __snake_case ( self : Any ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
snake_case : Tuple =[c**2 for c in self.__components]
return math.sqrt(sum(_snake_case ) )
def __snake_case ( self : Any, _snake_case : Vector, _snake_case : bool = False ):
'''simple docstring'''
snake_case : Dict =self * other
snake_case : int =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a ( lowerCamelCase_ ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
return Vector([0] * dimension )
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (isinstance(lowerCamelCase_ , lowerCamelCase_ ))
snake_case : Optional[int] =[0] * dimension
snake_case : List[Any] =1
return Vector(lowerCamelCase_ )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (isinstance(lowerCamelCase_ , (int, float) ))
)
return x * scalar + y
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
random.seed(lowerCamelCase_ )
snake_case : Optional[int] =[random.randint(lowerCamelCase_ , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )]
return Vector(lowerCamelCase_ )
class lowerCAmelCase_ :
def __init__( self : int, _snake_case : list[list[float]], _snake_case : int, _snake_case : int ):
'''simple docstring'''
snake_case : Tuple =matrix
snake_case : Tuple =w
snake_case : Tuple =h
def __str__( self : Union[str, Any] ):
'''simple docstring'''
snake_case : str =''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : List[Any], _snake_case : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
snake_case : Union[str, Any] =[]
for i in range(self.__height ):
snake_case : str =[
self.__matrix[i][j] + other.component(_snake_case, _snake_case )
for j in range(self.__width )
]
matrix.append(_snake_case )
return Matrix(_snake_case, self.__width, self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : List[str], _snake_case : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
snake_case : Dict =[]
for i in range(self.__height ):
snake_case : List[str] =[
self.__matrix[i][j] - other.component(_snake_case, _snake_case )
for j in range(self.__width )
]
matrix.append(_snake_case )
return Matrix(_snake_case, self.__width, self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : int, _snake_case : float ):
'''simple docstring'''
...
@overload
def __mul__( self : Union[str, Any], _snake_case : Vector ):
'''simple docstring'''
...
def __mul__( self : str, _snake_case : float | Vector ):
'''simple docstring'''
if isinstance(_snake_case, _snake_case ): # matrix-vector
if len(_snake_case ) == self.__width:
snake_case : List[str] =zero_vector(self.__height )
for i in range(self.__height ):
snake_case : Any =[
self.__matrix[i][j] * other.component(_snake_case )
for j in range(self.__width )
]
ans.change_component(_snake_case, sum(_snake_case ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(_snake_case, (int, float) ): # matrix-scalar
snake_case : List[Any] =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_snake_case, self.__width, self.__height )
return None
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return self.__height
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return self.__width
def __snake_case ( self : Dict, _snake_case : int, _snake_case : int ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def __snake_case ( self : int, _snake_case : int, _snake_case : int, _snake_case : float ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
snake_case : Optional[Any] =value
else:
raise Exception('''change_component: indices out of bounds''' )
def __snake_case ( self : Dict, _snake_case : int, _snake_case : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
snake_case : Optional[int] =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_snake_case ) ):
snake_case : Any =minor[i][:y] + minor[i][y + 1 :]
return Matrix(_snake_case, self.__width - 1, self.__height - 1 ).determinant()
def __snake_case ( self : int, _snake_case : int, _snake_case : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_snake_case, _snake_case )
else:
raise Exception('''Indices out of bounds''' )
def __snake_case ( self : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
snake_case : Union[str, Any] =[
self.__matrix[0][y] * self.cofactor(0, _snake_case ) for y in range(self.__width )
]
return sum(_snake_case )
def _a ( lowerCamelCase_ ):
snake_case : list[list[float]] =[[0] * n for _ in range(lowerCamelCase_ )]
return Matrix(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
random.seed(lowerCamelCase_ )
snake_case : list[list[float]] =[
[random.randint(lowerCamelCase_ , lowerCamelCase_ ) for _ in range(lowerCamelCase_ )] for _ in range(lowerCamelCase_ )
]
return Matrix(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 136 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _a ( lowerCamelCase_ ):
snake_case : List[Any] =prime_factors(lowerCamelCase_ )
if is_square_free(lowerCamelCase_ ):
return -1 if len(lowerCamelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 1 |
lowerCAmelCase__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 321 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = len(A__ )
UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 | 0 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class _UpperCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , __lowerCAmelCase , )
| 715 |
'''simple docstring'''
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict):
'''simple docstring'''
__lowercase =(0, 0)
__lowercase =None
__lowercase =0
__lowercase =0
__lowercase =0
def __eq__( self : List[str] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.position == cell.position
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
print(self.position)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : List[str]=(5, 5)):
'''simple docstring'''
__lowercase =np.zeros(_lowerCAmelCase)
__lowercase =world_size[0]
__lowercase =world_size[1]
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
print(self.w)
def __lowerCamelCase ( self : int , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowercase =cell.position[0]
__lowercase =cell.position[1]
__lowercase =[]
for n in neughbour_cord:
__lowercase =current_x + n[0]
__lowercase =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowercase =Cell()
__lowercase =(x, y)
__lowercase =cell
neighbours.append(_lowerCAmelCase)
return neighbours
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
__lowercase =[]
_open.append(_lowerCAmelCase )
while _open:
__lowercase =np.argmin([n.f for n in _open] )
__lowercase =_open[min_f]
_closed.append(_open.pop(_lowerCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(_lowerCAmelCase ):
for c in _closed:
if c == n:
continue
__lowercase =current.g + 1
__lowercase , __lowercase =n.position
__lowercase , __lowercase =goal.position
__lowercase =(ya - ya) ** 2 + (xa - xa) ** 2
__lowercase =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowerCAmelCase )
__lowercase =[]
while current.parent is not None:
path.append(current.position )
__lowercase =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCamelCase = Gridworld()
# Start position and goal
lowerCamelCase = Cell()
lowerCamelCase = (0, 0)
lowerCamelCase = Cell()
lowerCamelCase = (4, 4)
print(f"path from {start.position} to {goal.position}")
lowerCamelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCamelCase = 1
print(world.w)
| 454 | 0 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_ = 'scheduler_config.json'
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = 5
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = 42
class a :
"""simple docstring"""
__lowerCAmelCase = SCHEDULER_CONFIG_NAME
__lowerCAmelCase = ["""dtype"""]
__lowerCAmelCase = []
__lowerCAmelCase = True
@classmethod
def lowercase_ ( cls , snake_case_ = None , snake_case_ = None , snake_case_=False , **snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[str] = cls.load_config(
pretrained_model_name_or_path=snake_case_ , subfolder=snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ , )
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = cls.from_config(snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ )
if hasattr(snake_case_ , """create_state""" ) and getattr(snake_case_ , """has_state""" , snake_case_ ):
__UpperCAmelCase: Union[str, Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowercase_ ( self , snake_case_ , snake_case_ = False , **snake_case_ ):
'''simple docstring'''
self.save_config(save_directory=snake_case_ , push_to_hub=snake_case_ , **snake_case_ )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase: Union[str, Any] = importlib.import_module(__name__.split(""".""" )[0] )
__UpperCAmelCase: Union[str, Any] = [
getattr(snake_case_ , snake_case_ ) for c in compatible_classes_str if hasattr(snake_case_ , snake_case_ )
]
return compatible_classes
def UpperCamelCase__ ( _lowercase : jnp.ndarray , _lowercase : Tuple[int] ) -> jnp.ndarray:
assert len(_lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowercase ) - x.ndim) ) , _lowercase )
def UpperCamelCase__ ( _lowercase : int , _lowercase : List[str]=0.9_99 , _lowercase : List[Any]=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(_lowercase : Optional[Any] ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
__UpperCAmelCase: Tuple = []
for i in range(_lowercase ):
__UpperCAmelCase: Any = i / num_diffusion_timesteps
__UpperCAmelCase: Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowercase ) / alpha_bar(_lowercase ) , _lowercase ) )
return jnp.array(_lowercase , dtype=_lowercase )
@flax.struct.dataclass
class a :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
@classmethod
def lowercase_ ( cls , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = scheduler.config
if config.trained_betas is not None:
__UpperCAmelCase: Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__UpperCAmelCase: Tuple = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase: List[str] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase: Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__UpperCAmelCase: List[Any] = 1.0 - betas
__UpperCAmelCase: Tuple = jnp.cumprod(snake_case_ , axis=0 )
return cls(
alphas=snake_case_ , betas=snake_case_ , alphas_cumprod=snake_case_ , )
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Any:
__UpperCAmelCase: Optional[int] = state.alphas_cumprod
__UpperCAmelCase: Any = alphas_cumprod[timesteps] ** 0.5
__UpperCAmelCase: str = sqrt_alpha_prod.flatten()
__UpperCAmelCase: Union[str, Any] = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
__UpperCAmelCase: List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCAmelCase: Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
__UpperCAmelCase: List[str] = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Union[str, Any]:
__UpperCAmelCase, __UpperCAmelCase: Tuple = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase: Optional[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Optional[Any]:
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase: List[str] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity | 523 | '''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ = 'RegNetConfig'
# Base docstring
SCREAMING_SNAKE_CASE_ = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE_ = [1, 10_88, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE_ = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 3 , snake_case_ = 1 , snake_case_ = 1 , snake_case_ = "relu" , ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Optional[int] = nn.Convad(
snake_case_ , snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=kernel_size // 2 , groups=snake_case_ , bias=snake_case_ , )
__UpperCAmelCase: Any = nn.BatchNormad(snake_case_ )
__UpperCAmelCase: Optional[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = self.convolution(snake_case_ )
__UpperCAmelCase: str = self.normalization(snake_case_ )
__UpperCAmelCase: Optional[Any] = self.activation(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: List[str] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__UpperCAmelCase: Tuple = config.num_channels
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__UpperCAmelCase: int = self.embedder(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 2 ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Any = nn.Convad(snake_case_ , snake_case_ , kernel_size=1 , stride=snake_case_ , bias=snake_case_ )
__UpperCAmelCase: Union[str, Any] = nn.BatchNormad(snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[str] = self.convolution(snake_case_ )
__UpperCAmelCase: Optional[int] = self.normalization(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
__UpperCAmelCase: Union[str, Any] = nn.Sequential(
nn.Convad(snake_case_ , snake_case_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(snake_case_ , snake_case_ , kernel_size=1 ) , nn.Sigmoid() , )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.pooler(snake_case_ )
__UpperCAmelCase: List[Any] = self.attention(snake_case_ )
__UpperCAmelCase: int = hidden_state * attention
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Union[str, Any] = in_channels != out_channels or stride != 1
__UpperCAmelCase: Optional[Any] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase: List[str] = (
RegNetShortCut(snake_case_ , snake_case_ , stride=snake_case_ ) if should_apply_shortcut else nn.Identity()
)
__UpperCAmelCase: Optional[int] = nn.Sequential(
RegNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(snake_case_ , snake_case_ , stride=snake_case_ , groups=snake_case_ , activation=config.hidden_act ) , RegNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=snake_case_ ) , )
__UpperCAmelCase: Optional[Any] = ACTaFN[config.hidden_act]
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = hidden_state
__UpperCAmelCase: List[str] = self.layer(snake_case_ )
__UpperCAmelCase: Any = self.shortcut(snake_case_ )
hidden_state += residual
__UpperCAmelCase: List[str] = self.activation(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Dict = in_channels != out_channels or stride != 1
__UpperCAmelCase: List[str] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase: int = (
RegNetShortCut(snake_case_ , snake_case_ , stride=snake_case_ ) if should_apply_shortcut else nn.Identity()
)
__UpperCAmelCase: Tuple = nn.Sequential(
RegNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(snake_case_ , snake_case_ , stride=snake_case_ , groups=snake_case_ , activation=config.hidden_act ) , RegNetSELayer(snake_case_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=snake_case_ ) , )
__UpperCAmelCase: Tuple = ACTaFN[config.hidden_act]
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = hidden_state
__UpperCAmelCase: List[Any] = self.layer(snake_case_ )
__UpperCAmelCase: int = self.shortcut(snake_case_ )
hidden_state += residual
__UpperCAmelCase: Optional[Any] = self.activation(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 2 , snake_case_ = 2 , ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Tuple = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
__UpperCAmelCase: Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
snake_case_ , snake_case_ , snake_case_ , stride=snake_case_ , ) , *[layer(snake_case_ , snake_case_ , snake_case_ ) for _ in range(depth - 1 )] , )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = self.layers(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
snake_case_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__UpperCAmelCase: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case_ , config.depths[1:] ):
self.stages.append(RegNetStage(snake_case_ , snake_case_ , snake_case_ , depth=snake_case_ ) )
def lowercase_ ( self , snake_case_ , snake_case_ = False , snake_case_ = True ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCAmelCase: Any = hidden_states + (hidden_state,)
__UpperCAmelCase: Dict = stage_module(snake_case_ )
if output_hidden_states:
__UpperCAmelCase: Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=snake_case_ , hidden_states=snake_case_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = RegNetConfig
__lowerCAmelCase = """regnet"""
__lowerCAmelCase = """pixel_values"""
__lowerCAmelCase = True
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(snake_case_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase_ ( self , snake_case_ , snake_case_=False ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase: Dict = value
SCREAMING_SNAKE_CASE_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , __lowerCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
__UpperCAmelCase: Dict = config
__UpperCAmelCase: List[str] = RegNetEmbeddings(snake_case_ )
__UpperCAmelCase: List[str] = RegNetEncoder(snake_case_ )
__UpperCAmelCase: Any = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = None ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase: str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase: Tuple = self.embedder(snake_case_ )
__UpperCAmelCase: List[str] = self.encoder(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
__UpperCAmelCase: List[Any] = encoder_outputs[0]
__UpperCAmelCase: Dict = self.pooler(snake_case_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __lowerCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
__UpperCAmelCase: int = config.num_labels
__UpperCAmelCase: List[str] = RegNetModel(snake_case_ )
# classification head
__UpperCAmelCase: Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase: List[str] = self.regnet(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
__UpperCAmelCase: Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase: List[Any] = self.classifier(snake_case_ )
__UpperCAmelCase: str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__UpperCAmelCase: int = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__UpperCAmelCase: Optional[Any] = """single_label_classification"""
else:
__UpperCAmelCase: Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
__UpperCAmelCase: Any = MSELoss()
if self.num_labels == 1:
__UpperCAmelCase: Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__UpperCAmelCase: Tuple = loss_fct(snake_case_ , snake_case_ )
elif self.config.problem_type == "single_label_classification":
__UpperCAmelCase: Dict = CrossEntropyLoss()
__UpperCAmelCase: int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__UpperCAmelCase: List[str] = BCEWithLogitsLoss()
__UpperCAmelCase: Any = loss_fct(snake_case_ , snake_case_ )
if not return_dict:
__UpperCAmelCase: List[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states ) | 523 | 1 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
SCREAMING_SNAKE_CASE__ = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(args=_lowerCamelCase )
try:
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
SCREAMING_SNAKE_CASE__ = " ".join(str(_lowerCamelCase ).split(" " )[:-1] )
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = eval(str(_lowerCamelCase ).split(" " )[-1] )
SCREAMING_SNAKE_CASE__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE__ = full_error_msg + begin_error_msg + str(_lowerCamelCase )
raise ValueError(_lowerCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 704 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 538 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__magic_name__ : List[str] = logging.getLogger()
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = {}
_snake_case = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
_snake_case = json.load(snake_case__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
__magic_name__ : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase( self ):
import xla_spawn
_snake_case = self.get_auto_remove_tmp_dir()
_snake_case = F'''\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , "argv" , SCREAMING_SNAKE_CASE__ ):
_snake_case = time()
xla_spawn.main()
_snake_case = time()
_snake_case = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCamelCase( self ):
import xla_spawn
_snake_case = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , "argv" , SCREAMING_SNAKE_CASE__ ):
xla_spawn.main()
| 672 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ :str = logging.getLogger()
def A_ ( snake_case__ , snake_case__ ) -> Optional[Any]:
_UpperCamelCase :Optional[int] = '''\n'''.join(snake_case__ )
Path(snake_case__ ).open('''w''' ).writelines(snake_case__ )
UpperCamelCase__ :Dict = """patrickvonplaten/t5-tiny-random"""
UpperCamelCase__ :List[Any] = """sshleifer/bart-tiny-random"""
UpperCamelCase__ :str = """sshleifer/tiny-mbart"""
UpperCamelCase__ :Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class A( lowerCamelCase__ ):
"""simple docstring"""
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :List[Any] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_UpperCamelCase :Union[str, Any] = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_UpperCamelCase :int = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :str = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
_UpperCamelCase :Any = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_UpperCamelCase :Union[str, Any] = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Dict = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_UpperCamelCase :Any = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_UpperCamelCase :Union[str, Any] = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
_UpperCamelCase :str = Path(self.get_auto_remove_tmp_dir() )
_UpperCamelCase :Optional[int] = str(tmp_dir / '''scores.json''' )
_UpperCamelCase :int = str(tmp_dir / '''val.target''' )
_dump_articles(SCREAMING_SNAKE_CASE__ , text['''en'''] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text['''de'''] )
_UpperCamelCase :List[str] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_UpperCamelCase :str = f"\n run_eval_search.py\n {model}\n {str(SCREAMING_SNAKE_CASE__ )}\n {str(SCREAMING_SNAKE_CASE__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
_UpperCamelCase :Union[str, Any] = [''' num_beams | length_penalty''', model, '''Best score args''']
_UpperCamelCase :List[Any] = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 355 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCamelCase__ = 2048
UpperCamelCase__ = 4096
UpperCamelCase__ = 42
UpperCamelCase__ = os.environ.pop("""PROCESS_TRAIN""", """false""")
UpperCamelCase__ = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def _a ( SCREAMING_SNAKE_CASE_ : str ):
def choose_first(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
__lowerCAmelCase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__lowerCAmelCase = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
__lowerCAmelCase = {"id": example["id"]}
__lowerCAmelCase = example["annotations"]
__lowerCAmelCase = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
__lowerCAmelCase = ["yes"] if 1 in yes_no_answer else ["no"]
__lowerCAmelCase = __lowerCAmelCase = []
__lowerCAmelCase = __lowerCAmelCase = []
__lowerCAmelCase = ["<cls>"]
else:
__lowerCAmelCase = ["short"]
__lowerCAmelCase = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
__lowerCAmelCase = ["long"]
__lowerCAmelCase = choose_first(annotation["long_answer"] , is_long_answer=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = []
answer.update(SCREAMING_SNAKE_CASE_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
__lowerCAmelCase = True
else:
__lowerCAmelCase = False
__lowerCAmelCase = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , SCREAMING_SNAKE_CASE_ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=False ):
__lowerCAmelCase = _get_single_answer(SCREAMING_SNAKE_CASE_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowerCAmelCase = example["document"]["tokens"]
__lowerCAmelCase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(SCREAMING_SNAKE_CASE_ ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__lowerCAmelCase = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__lowerCAmelCase = example["document"]["tokens"]
__lowerCAmelCase = answer["start_token"]
__lowerCAmelCase = answer["end_token"]
__lowerCAmelCase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__lowerCAmelCase = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
__lowerCAmelCase = doc["is_html"][answer["start_token"] : answer["end_token"]]
__lowerCAmelCase = doc["token"][answer["start_token"] : answer["end_token"]]
__lowerCAmelCase = " ".join([old[i] for i in range(len(SCREAMING_SNAKE_CASE_ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , SCREAMING_SNAKE_CASE_ , end="\n" )
print("Old:" , SCREAMING_SNAKE_CASE_ , end="\n\n" )
return {
"context": " ".join(SCREAMING_SNAKE_CASE_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple=20_48 , SCREAMING_SNAKE_CASE_ : Tuple=40_96 , SCREAMING_SNAKE_CASE_ : List[str]=True ):
# overlap will be of doc_stride - q_len
__lowerCAmelCase = get_context_and_ans(SCREAMING_SNAKE_CASE_ , assertion=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__lowerCAmelCase = tokenizer(example["question"]["text"] , out["context"] ).input_ids
__lowerCAmelCase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = input_ids[:q_len]
__lowerCAmelCase = range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , max_length - doc_stride )
for i in doc_start_indices:
__lowerCAmelCase = i + max_length - q_len
__lowerCAmelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(SCREAMING_SNAKE_CASE_ ),
"end_token": [-1_00] * len(SCREAMING_SNAKE_CASE_ ),
"category": category,
},
}
__lowerCAmelCase = out["context"].split()
__lowerCAmelCase = splitted_context[answer["end_token"]]
__lowerCAmelCase = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=SCREAMING_SNAKE_CASE_ , ).input_ids )
__lowerCAmelCase = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__lowerCAmelCase = len(tokenizer(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__lowerCAmelCase = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
__lowerCAmelCase = answer["start_token"]
__lowerCAmelCase = answer["end_token"]
if assertion:
__lowerCAmelCase = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , SCREAMING_SNAKE_CASE_ , end="\n\n" )
if len(SCREAMING_SNAKE_CASE_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__lowerCAmelCase = input_ids[:q_len]
__lowerCAmelCase = range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , max_length - doc_stride )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = [] # null, yes, no, long, short
for i in doc_start_indices:
__lowerCAmelCase = i + max_length - q_len
__lowerCAmelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__lowerCAmelCase = start_token - i + q_len
__lowerCAmelCase = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
__lowerCAmelCase = -1_00
__lowerCAmelCase = -1_00
answers_category.append("null" )
__lowerCAmelCase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(SCREAMING_SNAKE_CASE_ )
answers_end_token.append(SCREAMING_SNAKE_CASE_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
print("Old:" , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=20_48 , SCREAMING_SNAKE_CASE_ : Any=40_96 , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
__lowerCAmelCase = get_strided_contexts_and_ans(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , doc_stride=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , assertion=SCREAMING_SNAKE_CASE_ , )
return example
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
with jsonlines.open(SCREAMING_SNAKE_CASE_ , "a" ) as writer:
for example in tqdm(SCREAMING_SNAKE_CASE_ , total=len(SCREAMING_SNAKE_CASE_ ) , desc="Saving samples ... " ):
__lowerCAmelCase = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCamelCase__ = load_dataset("""natural_questions""")
UpperCamelCase__ = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
UpperCamelCase__ = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
UpperCamelCase__ = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
UpperCamelCase__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCamelCase__ = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
UpperCamelCase__ = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 708 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class a__ ( snake_case__ ):
def __init__( self , *_A , **_A ):
"""simple docstring"""
super().__init__(*_A , **_A )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __SCREAMING_SNAKE_CASE( self , _A=None ):
"""simple docstring"""
__lowerCAmelCase = {}
if top_k is not None:
__lowerCAmelCase = top_k
return {}, {}, postprocess_params
def __call__( self , _A , **_A ):
"""simple docstring"""
return super().__call__(_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = load_image(_A )
__lowerCAmelCase = self.image_processor(images=_A , return_tensors=self.framework )
return model_inputs
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.model(**_A )
return model_outputs
def __SCREAMING_SNAKE_CASE( self , _A , _A=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowerCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase = probs.topk(_A )
elif self.framework == "tf":
__lowerCAmelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
__lowerCAmelCase = tf.math.top_k(_A , k=_A )
__lowerCAmelCase , __lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowerCAmelCase = scores.tolist()
__lowerCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_A , _A )]
| 552 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[Any] = '''maskformer-swin'''
_lowercase : Union[str, Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=224 , _lowercase=4 , _lowercase=3 , _lowercase=96 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 12, 24] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1e-5 , _lowercase=None , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
_lowerCAmelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 5 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a ( UpperCAmelCase ):
def __init__( self , A_ , A_=None , A_=None , A_=0 ):
'''simple docstring'''
_UpperCAmelCase : Dict = 1.0 if scale is None else scale
_UpperCAmelCase : List[Any] = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.variance.sqrt()
class a ( nn.Module ):
def __init__( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
_UpperCAmelCase : Optional[int] = args_dim
_UpperCAmelCase : List[str] = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
_UpperCAmelCase : int = domain_map
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class a ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Union[str, Any] = function
def _UpperCAmelCase ( self , A_ , *A_ ):
'''simple docstring'''
return self.function(A_ , *A_ )
class a :
_lowercase = 42
_lowercase = 42
_lowercase = 42
def __init__( self , A_ = 1 ):
'''simple docstring'''
_UpperCAmelCase : Any = dim
_UpperCAmelCase : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None , ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.event_shape )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 0.0
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _UpperCAmelCase ( self , *A_ ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def _UpperCAmelCase ( A_ ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class a ( UpperCAmelCase ):
_lowercase = {"df": 1, "loc": 1, "scale": 1}
_lowercase = StudentT
@classmethod
def _UpperCAmelCase ( cls , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
_UpperCAmelCase : Dict = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a ( UpperCAmelCase ):
_lowercase = {"loc": 1, "scale": 1}
_lowercase = Normal
@classmethod
def _UpperCAmelCase ( cls , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a ( UpperCAmelCase ):
_lowercase = {"total_count": 1, "logits": 1}
_lowercase = NegativeBinomial
@classmethod
def _UpperCAmelCase ( cls , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 300 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
a_ = hf_hub_url(repo_id=_SCREAMING_SNAKE_CASE , path=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_SCREAMING_SNAKE_CASE )}""" | 702 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
if isinstance(UpperCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCamelCase_ :
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
pass
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
pass
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
a_ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
a_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
a_ , a_ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = {"""vision_model""": vision_model, """text_model""": text_model}
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
a_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
a_ , a_ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = {"""vision_model""": vision_model, """text_model""": text_model}
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
a_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
a_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
a_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
a_ = after_output[0]
a_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
a_ , a_ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = {"""vision_model""": vision_model, """text_model""": text_model}
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
a_ = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
a_ = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ = to_atuple(vision_model.config.image_size )
a_ = to_atuple(vision_model.config.patch_size )
a_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a_ = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
pt_model.to(_SCREAMING_SNAKE_CASE )
pt_model.eval()
# prepare inputs
a_ = inputs_dict
a_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
a_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
a_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
a_ = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
pt_model_loaded.to(_SCREAMING_SNAKE_CASE )
pt_model_loaded.eval()
with torch.no_grad():
a_ = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4E-2 )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
a_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
a_ = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
a_ = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
a_ = config_inputs_dict.pop("""vision_config""" )
a_ = config_inputs_dict.pop("""text_config""" )
a_ = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __magic_name__ ( self ):
a_ , a_ = self.get_pretrained_model_and_inputs()
a_ = model_a(**_SCREAMING_SNAKE_CASE )
a_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
a_ = model_a(**_SCREAMING_SNAKE_CASE )
a_ = after_outputs[0]
a_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@require_flax
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __magic_name__ ( self ):
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
a_ = 13
a_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a_ = random_attention_mask([batch_size, 4] )
a_ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = FlaxViTModel(_SCREAMING_SNAKE_CASE )
a_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
return vision_model, text_model
def __magic_name__ ( self ):
a_ = FlaxViTModelTester(self )
a_ = FlaxBertModelTester(self )
a_ = vit_model_tester.prepare_config_and_inputs()
a_ = bert_model_tester.prepare_config_and_inputs()
a_ , a_ = vision_config_and_inputs
a_ , a_ , a_ , a_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __magic_name__ ( self ):
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
a_ = 13
a_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a_ = random_attention_mask([batch_size, 4] )
a_ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE )
a_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
return vision_model, text_model
def __magic_name__ ( self ):
a_ = FlaxCLIPVisionModelTester(self )
a_ = FlaxBertModelTester(self )
a_ = clip_model_tester.prepare_config_and_inputs()
a_ = bert_model_tester.prepare_config_and_inputs()
a_ , a_ = vision_config_and_inputs
a_ , a_ , a_ , a_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
a_ = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
a_ = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
a_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
a_ = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
a_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a_ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) | 403 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __a ( unittest.TestCase ):
__UpperCamelCase : str = MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""tf""" )
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{"""sequence""": """My name is grouped""", """score""": 2.1E-0_5, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-0_5, """token""": 2_5506, """token_str""": """ accuser"""},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-0_5,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-0_5,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-0_5, """token""": 2941, """token_str""": """ Te"""},
] ,)
@require_torch
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""pt""" )
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{"""sequence""": """My name is Maul""", """score""": 2.2E-0_5, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS"""},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-0_5,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS"""},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
{"""sequence""": """My name is Patrick""", """score""": 2.1E-0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-0_5, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 1_3606, """token_str""": """ Clara"""},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask> <mask>""" ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=6 ) ,[
[
{
"""score""": 2.2E-0_5,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-0_5,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] ,)
@require_torch_gpu
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline("""fill-mask""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,device=0 ,framework="""pt""" )
# convert model to fp16
pipe.model.half()
__SCREAMING_SNAKE_CASE = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
@slow
@require_torch
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""pt""" )
self.run_large_test(lowerCamelCase )
@slow
@require_tf
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""tf""" )
self.run_large_test(lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,[
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,[
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] ,)
__SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,[
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] ,)
@require_torch
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""pt""" )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.run_pipeline_test(lowerCamelCase ,[] )
@require_tf
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""tf""" )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.run_pipeline_test(lowerCamelCase ,[] )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = fill_masker.tokenizer
__SCREAMING_SNAKE_CASE = fill_masker.model
__SCREAMING_SNAKE_CASE = fill_masker(
f"""This is a {tokenizer.mask_token}""" ,)
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
__SCREAMING_SNAKE_CASE = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
__SCREAMING_SNAKE_CASE = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
lowerCamelCase ,[
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
] ,)
with self.assertRaises(lowerCamelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCamelCase ):
fill_masker("""This is""" )
self.run_test_top_k(lowerCamelCase ,lowerCamelCase )
self.run_test_targets(lowerCamelCase ,lowerCamelCase )
self.run_test_top_k_targets(lowerCamelCase ,lowerCamelCase )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCamelCase ,lowerCamelCase )
self.fill_mask_with_multiple_masks(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
__SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:2]
# Pipeline argument
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ,targets=lowerCamelCase )
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
__SCREAMING_SNAKE_CASE = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} ,set(lowerCamelCase ) )
# Call argument
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=lowerCamelCase )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
__SCREAMING_SNAKE_CASE = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} ,set(lowerCamelCase ) )
# Score equivalence
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [top_mask["""token_str"""] for top_mask in outputs]
__SCREAMING_SNAKE_CASE = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase ) == set(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCamelCase ) ,nested_simplify(lowerCamelCase ) )
# Raises with invalid
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=[""""""] )
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets="""""" )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ,top_k=2 )
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] ,)
self.assertEqual(nested_simplify(lowerCamelCase ) ,nested_simplify(lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
# top_k=2, ntargets=3
__SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:3]
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=2 ,targets=lowerCamelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__SCREAMING_SNAKE_CASE = [el["""token_str"""] for el in sorted(lowerCamelCase ,key=lambda lowerCamelCase : x["score"] ,reverse=lowerCamelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase ).issubset(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=3 ,targets=lowerCamelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCamelCase ) ,nested_simplify(lowerCamelCase ) )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Dict ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
# String duplicates + id duplicates
__SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:3]
__SCREAMING_SNAKE_CASE = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__SCREAMING_SNAKE_CASE = fill_masker(f"""My name is {tokenizer.mask_token}""" ,targets=lowerCamelCase ,top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCamelCase ) ,3 )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
lowerCamelCase ,[
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
] ,)
| 109 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : Dict = 'linear'
__magic_name__ : Dict = 'cosine'
__magic_name__ : Optional[int] = 'cosine_with_restarts'
__magic_name__ : List[str] = 'polynomial'
__magic_name__ : Any = 'constant'
__magic_name__ : Union[str, Any] = 'constant_with_warmup'
__magic_name__ : str = 'piecewise_constant'
def A__ (snake_case : Optimizer , snake_case : int = -1 ) -> Optional[Any]:
return LambdaLR(snake_case , lambda snake_case : 1 , last_epoch=snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int = -1 ) -> List[Any]:
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1.0 , snake_case ) )
return 1.0
return LambdaLR(snake_case , snake_case , last_epoch=snake_case )
def A__ (snake_case : Optimizer , snake_case : str , snake_case : int = -1 ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : int = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCamelCase , __UpperCamelCase : Tuple = rule_str.split(""":""" )
__UpperCamelCase : int = int(snake_case )
__UpperCamelCase : Union[str, Any] = float(snake_case )
__UpperCamelCase : Optional[int] = value
__UpperCamelCase : Dict = float(rule_list[-1] )
def create_rules_function(snake_case : List[str] , snake_case : Any ):
def rule_func(snake_case : int ) -> float:
__UpperCamelCase : Any = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCamelCase : Tuple = create_rules_function(snake_case , snake_case )
return LambdaLR(snake_case , snake_case , last_epoch=snake_case )
def A__ (snake_case : int , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : str=-1 ) -> str:
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : float = 0.5 , snake_case : int = -1 ) -> List[str]:
def lr_lambda(snake_case : Dict ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
__UpperCamelCase : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case ) * 2.0 * progress )) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : int = 1 , snake_case : int = -1 ) -> Tuple:
def lr_lambda(snake_case : Optional[int] ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
__UpperCamelCase : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case ) * progress) % 1.0) )) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : str=1e-7 , snake_case : List[str]=1.0 , snake_case : Dict=-1 ) -> Tuple:
__UpperCamelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCamelCase : List[str] = lr_init - lr_end
__UpperCamelCase : Any = num_training_steps - num_warmup_steps
__UpperCamelCase : List[str] = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCamelCase : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(snake_case , snake_case , snake_case )
a__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A__ (snake_case : Union[str, SchedulerType] , snake_case : Optimizer , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : int = 1 , snake_case : float = 1.0 , snake_case : int = -1 , ) -> Dict:
__UpperCamelCase : List[str] = SchedulerType(snake_case )
__UpperCamelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(snake_case , last_epoch=snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(snake_case , step_rules=snake_case , last_epoch=snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(snake_case , num_warmup_steps=snake_case , last_epoch=snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , num_cycles=snake_case , last_epoch=snake_case , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , power=snake_case , last_epoch=snake_case , )
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , last_epoch=snake_case )
| 279 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
A__: List[str] = logging.getLogger(__name__)
class A__ ( UpperCAmelCase__ ):
def __init__( self :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any]=None ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , )
_a : Optional[int] =None
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :int ) -> Any:
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
_a : int =self._infer_socket_ifname()
# avoid clash with the NCCL port
_a : Union[str, Any] =str(distributed_port + 1 )
_a : Optional[int] =dist.new_group(ranks=UpperCamelCase_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __UpperCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any]=torch.floataa ) -> Tuple:
'''simple docstring'''
_a : Union[str, Any] =torch.empty(UpperCamelCase_ , dtype=UpperCamelCase_ )
dist.scatter(UpperCamelCase_ , src=0 , scatter_list=UpperCamelCase_ , group=self.process_group )
return target_tensor
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_a : List[str] =next((addr for addr in addrs if addr.startswith("""e""" )) , UpperCamelCase_ )
return ifname
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :int ) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
if not dist.is_initialized():
_a : Optional[Any] =self._main_retrieve(UpperCamelCase_ , UpperCamelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase_ )
# distributed training
_a : int =dist.get_world_size(group=self.process_group )
# gather logic
_a : Any =None
if self._is_main():
_a : Optional[int] =[torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCamelCase_ )]
dist.gather(torch.tensor(UpperCamelCase_ ) , dst=0 , gather_list=UpperCamelCase_ , group=self.process_group )
# scatter logic
_a : Optional[Any] =question_hidden_states.shape[0]
_a : Optional[Any] =[]
_a : int =[]
if self._is_main():
assert len(UpperCamelCase_ ) == world_size
_a : Any =self._main_retrieve(torch.cat(UpperCamelCase_ ).numpy() , UpperCamelCase_ )
_a : int =torch.tensor(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
_a : str =self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ )
_a : int =self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ )
_a : List[Any] =self._scattered(UpperCamelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
_a : List[Any] =self._scattered(UpperCamelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCamelCase_ )
| 704 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
A__: Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A__: Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ) -> Optional[Any]:
_a : List[str] =state_dict.pop(_UpperCAmelCase )
_a : Tuple =val
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> List[str]:
_a : Optional[Any] =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_a : List[str] =key.replace("""backbone.0.body""" ,"""backbone.conv_encoder.model""" )
_a : int =value
else:
_a : Any =value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> int:
_a : List[str] =""""""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_a : int =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_a : Optional[Any] =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : str =in_proj_weight[:256, :]
_a : List[str] =in_proj_bias[:256]
_a : Optional[int] =in_proj_weight[256:512, :]
_a : List[str] =in_proj_bias[256:512]
_a : Optional[int] =in_proj_weight[-256:, :]
_a : Tuple =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_a : Tuple =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_a : str =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] =in_proj_weight[:256, :]
_a : List[Any] =in_proj_bias[:256]
_a : Tuple =in_proj_weight[256:512, :]
_a : str =in_proj_bias[256:512]
_a : Any =in_proj_weight[-256:, :]
_a : int =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_a : Any =state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_a : int =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_a : int =in_proj_weight_cross_attn[:256, :]
_a : Any =in_proj_bias_cross_attn[:256]
_a : str =in_proj_weight_cross_attn[256:512, :]
_a : Dict =in_proj_bias_cross_attn[256:512]
_a : Any =in_proj_weight_cross_attn[-256:, :]
_a : Union[str, Any] =in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Any ) -> int:
_a , _a : Union[str, Any] =image.size
_a : Dict =max(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] =800 if """detection""" in checkpoint_url else 1000
_a : Any =target_max_size / current_max_size
_a : int =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> int:
_a : Optional[Any] =F.to_tensor(_UpperCAmelCase )
_a : Tuple =F.normalize(_UpperCAmelCase ,mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] ,std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ) -> Optional[int]:
logger.info("""Converting model...""" )
# load original state dict
_a : Dict =torch.hub.load_state_dict_from_url(_UpperCAmelCase ,map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
_a : List[Any] =rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_a : Dict ="""model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_a : Any =state_dict.pop(_UpperCAmelCase )
_a : List[Any] =val
# create HuggingFace model and load state dict
_a : int =TableTransformerConfig(
backbone="""resnet18""" ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
_a : Union[str, Any] =15
_a : Tuple =2
_a : Optional[Any] ={0: """table""", 1: """table rotated"""}
_a : Tuple =idalabel
_a : List[Any] ={v: k for k, v in idalabel.items()}
else:
_a : Union[str, Any] =125
_a : int =6
_a : int ={
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
_a : List[str] =idalabel
_a : Optional[int] ={v: k for k, v in idalabel.items()}
_a : Optional[int] =DetrImageProcessor(
format="""coco_detection""" ,max_size=800 if """detection""" in checkpoint_url else 1000 )
_a : Optional[Any] =TableTransformerForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# verify our conversion
_a : List[Any] ="""example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
_a : str =hf_hub_download(repo_id="""nielsr/example-pdf""" ,repo_type="""dataset""" ,filename=_UpperCAmelCase )
_a : Tuple =Image.open(_UpperCAmelCase ).convert("""RGB""" )
_a : Dict =normalize(resize(_UpperCAmelCase ,_UpperCAmelCase ) ).unsqueeze(0 )
_a : List[str] =model(_UpperCAmelCase )
if "detection" in checkpoint_url:
_a : Any =(1, 15, 3)
_a : int =torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
_a : str =torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
_a : str =(1, 125, 7)
_a : str =torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
_a : int =torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,_UpperCAmelCase ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,_UpperCAmelCase ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
_a : Dict =(
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_UpperCAmelCase )
image_processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__: Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 506 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
def lowerCamelCase__ ( __snake_case, __snake_case=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = {}
for old_key in state_dict.keys():
_UpperCamelCase = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_UpperCamelCase = key.replace('''moe_layer.experts.0''', F'''ffn.experts.expert_{expert_idx}''' )
else:
_UpperCamelCase = key.replace('''moe_layer.experts.''', '''ffn.experts.expert_''' )
if "gate" in key:
_UpperCamelCase = key.replace('''.moe_layer.gate.wg''', '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
_UpperCamelCase = key.replace('''.fc2.''', '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
_UpperCamelCase = key.replace('''.fc1.''', '''.ffn.fc1.''' )
if ".encoder_attn." in key:
_UpperCamelCase = key.replace('''.encoder_attn.''', '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
_UpperCamelCase = key.replace('''encoder_attn_layer_norm''', '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
_UpperCamelCase = key.replace('''final_layer_norm''', '''ff_layer_norm''' )
_UpperCamelCase = state_dict[old_key]
return new_dict
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case = WEIGHTS_NAME ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = 0
os.makedirs(__snake_case, exist_ok=__snake_case )
for expert in range(__snake_case ):
_UpperCamelCase = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
_UpperCamelCase = torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
_UpperCamelCase = rename_fairseq_keys(__snake_case, __snake_case )
_UpperCamelCase = os.path.join(
__snake_case, weights_name.replace('''.bin''', F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case, __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
_UpperCamelCase = os.path.join(__snake_case, weights_name.replace('''.bin''', F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
_UpperCamelCase = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
_UpperCamelCase = rename_fairseq_keys(__snake_case, __snake_case )
_UpperCamelCase = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
_UpperCamelCase = os.path.join(__snake_case, __snake_case )
torch.save(__snake_case, __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case, __snake_case )
# Otherwise, let's build the index
_UpperCamelCase = {}
for idx, shard in enumerate(__snake_case ):
_UpperCamelCase = weights_name.replace('''.bin''', F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
_UpperCamelCase = os.path.join(__snake_case, weights_name.replace('''.bin''', F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case, os.path.join(__snake_case, __snake_case ) )
for key in shard:
_UpperCamelCase = shard_file
# Add the metadata
_UpperCamelCase = {'''total_size''': total_size}
_UpperCamelCase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case, __snake_case ), '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = json.dumps(__snake_case, indent=2, sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a = parser.parse_args()
_a , _a = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_a = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_a = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 19 |
"""simple docstring"""
from math import ceil
def __a ( a, a ):
"""simple docstring"""
_a = list(range(0, a ) )
_a = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_a = []
for i in device_map_blocks:
if device_map_blocks.count(a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(a )
# Missing blocks
_a = [i for i in blocks if i not in device_map_blocks]
_a = [i for i in device_map_blocks if i not in blocks]
if len(a ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(a ) )
def __a ( a, a ):
"""simple docstring"""
_a = list(range(a ) )
_a = int(ceil(n_layers / len(a ) ) )
_a = [layers[i : i + n_blocks] for i in range(0, a, a )]
return dict(zip(a, a ) )
| 388 | 0 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase ( _snake_case : str=None ):
'''simple docstring'''
if subparsers is not None:
lowercase__ = subparsers.add_parser("env" )
else:
lowercase__ = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" ,default=_snake_case ,help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_snake_case )
return parser
def lowerCamelCase ( _snake_case : Tuple ):
'''simple docstring'''
lowercase__ = torch.__version__
lowercase__ = torch.cuda.is_available()
lowercase__ = is_xpu_available()
lowercase__ = is_npu_available()
lowercase__ = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_snake_case ):
lowercase__ = load_config_from_file(args.config_file ).to_dict()
lowercase__ = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"PyTorch XPU available": str(_snake_case ),
"PyTorch NPU available": str(_snake_case ),
"System RAM": f'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase__ = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
lowercase__ = (
"\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_snake_case ,_snake_case )
else f'''\t{accelerate_config}'''
)
print(_snake_case )
lowercase__ = accelerate_config
return info
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = env_command_parser()
lowercase__ = parser.parse_args()
env_command(_snake_case )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 539 |
'''simple docstring'''
def lowerCamelCase ( _snake_case : int = 4_000_000 ):
'''simple docstring'''
lowercase__ = [0, 1]
lowercase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase__ = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 539 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.