code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314
| 1
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(_A )
# Let's go
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if not hasattr(_A , '''func''' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ = args.func(_A )
service.run()
if __name__ == "__main__":
main()
| 314
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [1]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 0, 0, 0
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
for _ in range(1 , _A ):
SCREAMING_SNAKE_CASE__ = min(_A , _A , _A )
ugly_nums.append(_A )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 314
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
| 1
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314
|
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE__ = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(_A )
else:
SCREAMING_SNAKE_CASE__ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE__ = num - 1
SCREAMING_SNAKE_CASE__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 314
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 1
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
SCREAMING_SNAKE_CASE__ = features.copy()
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if issubclass(_A , _A ):
SCREAMING_SNAKE_CASE__ = jsonl_path
elif issubclass(_A , _A ):
SCREAMING_SNAKE_CASE__ = [jsonl_path]
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def UpperCAmelCase_ ( _A , _A , _A=("train",) ):
'''simple docstring'''
assert isinstance(_A , _A )
for split in splits:
SCREAMING_SNAKE_CASE__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if split:
SCREAMING_SNAKE_CASE__ = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE__ = '''train'''
SCREAMING_SNAKE_CASE__ = {'''train''': jsonl_path, '''test''': jsonl_path}
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return json.load(_A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return [json.loads(_A ) for line in buffer]
class UpperCAmelCase__ :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowercase_ ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowercase_ ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCamelCase ) == 10
def lowercase_ ( self : Tuple , __lowerCamelCase : Optional[int] ) -> int:
with pytest.raises(__lowerCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp('''data''' ) / f'''test.json.{extension}'''
SCREAMING_SNAKE_CASE__ = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , compression=__lowerCamelCase ).write()
with fsspec.open(__lowerCamelCase , '''rb''' , compression='''infer''' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with fsspec.open(__lowerCamelCase , '''rb''' , compression='''infer''' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert exported_content == original_content
| 314
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 1
|
def UpperCAmelCase_ ( _A = 10_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 2**power
SCREAMING_SNAKE_CASE__ = str(_A )
SCREAMING_SNAKE_CASE__ = list(_A )
SCREAMING_SNAKE_CASE__ = 0
for i in list_num:
sum_of_num += int(_A )
return sum_of_num
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
_SCREAMING_SNAKE_CASE : int = solution(power)
print('''Sum of the digits is: ''', result)
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
from math import factorial
def UpperCAmelCase_ ( _A = 1_00 ):
'''simple docstring'''
return sum(int(_A ) for x in str(factorial(_A ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 314
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(_A )
for i in range(n - 1 ):
for j in range(i + 1 , _A ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if len(_A ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE__ = len(_A ) // 2
SCREAMING_SNAKE_CASE__ = arr[0:mid]
SCREAMING_SNAKE_CASE__ = arr[mid:]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = count_inversions_recursive(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = count_inversions_recursive(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = _count_cross_inversions(_A , _A )
SCREAMING_SNAKE_CASE__ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
while i < len(_A ) and j < len(_A ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_A ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_A ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE__ = count_inversions_bf(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = count_inversions_recursive(_A )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , _A )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE__ = count_inversions_bf(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = count_inversions_recursive(_A )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _A )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = count_inversions_bf(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = count_inversions_recursive(_A )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _A )
if __name__ == "__main__":
main()
| 314
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 1
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Tuple=18 , __lowerCamelCase : str=30 , __lowerCamelCase : List[str]=400 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=False , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''height''': 20, '''width''': 20}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_reduce_labels
def lowercase_ ( self : Tuple ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE__ = Image.open(dataset[0]['''file'''] )
SCREAMING_SNAKE_CASE__ = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE__ = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE__ = Image.open(ds[1]['''file'''] )
SCREAMING_SNAKE_CASE__ = Image.open(ds[2]['''file'''] )
SCREAMING_SNAKE_CASE__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = BeitImageProcessor if is_vision_available() else None
def lowercase_ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = BeitImageProcessingTester(self )
@property
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
pass
def lowercase_ ( self : Tuple ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self : int ) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self : List[Any] ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 314
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = ['''ConditionalDetrFeatureExtractor''']
_SCREAMING_SNAKE_CASE : List[Any] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase_ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(_A , _A )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(_A , _A ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "inv_freq":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(_A )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('''*''' , _A )
if "pos_bias_u" in name:
SCREAMING_SNAKE_CASE__ = None
elif "pos_bias_v" in name:
SCREAMING_SNAKE_CASE__ = None
elif "weight_g" in name:
SCREAMING_SNAKE_CASE__ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ = '''weight'''
elif "running_mean" in name:
SCREAMING_SNAKE_CASE__ = '''running_mean'''
elif "inv_freq" in name:
SCREAMING_SNAKE_CASE__ = '''inv_freq'''
elif "running_var" in name:
SCREAMING_SNAKE_CASE__ = '''running_var'''
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE__ = '''num_batches_tracked'''
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase_ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A , _A=None , _A=None , _A=True ):
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ = WavaVecaConformerConfig.from_pretrained(_A , hidden_act='''swish''' )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
SCREAMING_SNAKE_CASE__ = '''rotary'''
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , '''vocab.json''' )
if not os.path.isdir(_A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
SCREAMING_SNAKE_CASE__ = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
with open(_A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_A , _A )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_A , )
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
SCREAMING_SNAKE_CASE__ = WavaVecaConformerForCTC(_A )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaConformerForPreTraining(_A )
if is_finetuned:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ = argparse.Namespace(task='''audio_pretraining''' )
SCREAMING_SNAKE_CASE__ = fairseq.tasks.setup_task(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(_A , _A , not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 314
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return str(_A ) == str(_A )[::-1]
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return int(_A ) + int(str(_A )[::-1] )
def UpperCAmelCase_ ( _A = 1_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for num in range(1 , _A ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = num
while iterations < 50:
SCREAMING_SNAKE_CASE__ = sum_reverse(_A )
iterations += 1
if is_palindrome(_A ):
break
else:
lychrel_nums.append(_A )
return len(_A )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314
| 1
|
import unittest
from knapsack import knapsack as k
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [0]
SCREAMING_SNAKE_CASE__ = [0]
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 0 )
SCREAMING_SNAKE_CASE__ = [60]
SCREAMING_SNAKE_CASE__ = [10]
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 0 )
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = [1, 2, 3]
SCREAMING_SNAKE_CASE__ = [3, 2, 1]
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 5 )
def lowercase_ ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ = 50
SCREAMING_SNAKE_CASE__ = [60, 100, 120]
SCREAMING_SNAKE_CASE__ = [10, 20, 30]
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 314
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314
| 1
|
def UpperCAmelCase_ ( _A = 50_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = int((limit - 24) ** (1 / 2) )
SCREAMING_SNAKE_CASE__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _A ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE__ = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
SCREAMING_SNAKE_CASE__ = primea * primea * primea * primea
SCREAMING_SNAKE_CASE__ = square + cube + tetr
if total >= limit:
break
ret.add(_A )
return len(_A )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (UnCLIPScheduler,)
def lowercase_ ( self : List[str] , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def lowercase_ ( self : int ) -> Tuple:
pass
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
| 314
| 1
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = PhobertTokenizer
a = False
def lowercase_ ( self : Optional[int] ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = ['''#version: 0.2''', '''l à</w>''']
SCREAMING_SNAKE_CASE__ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def lowercase_ ( self : Any , **__lowerCamelCase : Optional[Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = '''Tôi là VinAI Research'''
SCREAMING_SNAKE_CASE__ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ = '''Tôi là VinAI Research'''
SCREAMING_SNAKE_CASE__ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
print(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
| 314
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE : Any = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
_SCREAMING_SNAKE_CASE : str = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_INIT_CONFIGURATION
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = BertTokenizer
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Union[str, Any]="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : List[Any]="[PAD]" , __lowerCamelCase : int="[CLS]" , __lowerCamelCase : Tuple="[MASK]" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Any=None , **__lowerCamelCase : List[Any] , ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowerCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ = normalizer_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def lowercase_ ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : str=None ) -> List[str]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_SCREAMING_SNAKE_CASE : List[Any] = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_SCREAMING_SNAKE_CASE : str = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
_SCREAMING_SNAKE_CASE : Dict = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : str ) -> str:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int = CHRF.CHAR_ORDER , __lowerCamelCase : int = CHRF.WORD_ORDER , __lowerCamelCase : int = CHRF.BETA , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , ) -> Any:
SCREAMING_SNAKE_CASE__ = len(references[0] )
if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
SCREAMING_SNAKE_CASE__ = [[refs[i] for refs in references] for i in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE__ = CHRF(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sb_chrf.corpus_score(__lowerCamelCase , __lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 314
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 1
|
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def lowercase_ ( self : Tuple , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Dict ) -> Any:
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
SCREAMING_SNAKE_CASE__ = truncation
SCREAMING_SNAKE_CASE__ = tokenize_kwargs
SCREAMING_SNAKE_CASE__ = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = return_tensors
return preprocess_params, {}, postprocess_params
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[int] ) -> Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE__ = self.framework
SCREAMING_SNAKE_CASE__ = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
return model_inputs
def lowercase_ ( self : Dict , __lowerCamelCase : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.model(**__lowerCamelCase )
return model_outputs
def lowercase_ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple=False ) -> Optional[Any]:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ) -> List[str]:
return super().__call__(*__lowerCamelCase , **__lowerCamelCase )
| 314
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A , _A=False , _A=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(_A )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
__lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def lowercase_ ( self : Tuple ) -> List[str]:
return self._images
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
return self._labels
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self._num_examples
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._epochs_completed
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str:
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
SCREAMING_SNAKE_CASE__ = (
'''Validation size should be between 0 and '''
F'''{len(_A )}. Received: {validation_size}.'''
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 314
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
def UpperCAmelCase_ ( _A=2 , _A=3 , _A=16 , _A = 10 , _A = 2 ):
'''simple docstring'''
def get_dataset(_A ):
SCREAMING_SNAKE_CASE__ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_A , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
SCREAMING_SNAKE_CASE__ = get_dataset(_A )
SCREAMING_SNAKE_CASE__ = get_dataset(_A )
SCREAMING_SNAKE_CASE__ = DataLoader(_A , shuffle=_A , batch_size=_A , num_workers=4 )
SCREAMING_SNAKE_CASE__ = DataLoader(_A , shuffle=_A , batch_size=_A , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCAmelCase_ ( _A , _A , _A , _A , _A , _A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for epoch in range(_A ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = batch
SCREAMING_SNAKE_CASE__ = model(_A )
SCREAMING_SNAKE_CASE__ = torch.nn.functional.mse_loss(_A , _A )
accelerator.backward(_A )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.randn(1 ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
return x * self.a + self.b
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = dummy_dataloaders()
SCREAMING_SNAKE_CASE__ = ProjectConfiguration(total_limit=1 , project_dir=__lowerCamelCase , automatic_checkpoint_naming=__lowerCamelCase )
# Train baseline
SCREAMING_SNAKE_CASE__ = Accelerator(project_config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowercase_ ( self : Optional[Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save initial
SCREAMING_SNAKE_CASE__ = os.path.join(__lowerCamelCase , '''initial''' )
accelerator.save_state(__lowerCamelCase )
((SCREAMING_SNAKE_CASE__),(SCREAMING_SNAKE_CASE__)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__ = optimizer.state_dict()
SCREAMING_SNAKE_CASE__ = train(3 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((SCREAMING_SNAKE_CASE__),(SCREAMING_SNAKE_CASE__)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = dummy_dataloaders()
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.load_state(__lowerCamelCase )
((SCREAMING_SNAKE_CASE__),(SCREAMING_SNAKE_CASE__)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__ = optimizer.state_dict()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = train(2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save everything
SCREAMING_SNAKE_CASE__ = os.path.join(__lowerCamelCase , '''checkpoint''' )
accelerator.save_state(__lowerCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(__lowerCamelCase )
test_rands += train(1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((SCREAMING_SNAKE_CASE__),(SCREAMING_SNAKE_CASE__)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__ = optimizer.state_dict()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = dummy_dataloaders()
SCREAMING_SNAKE_CASE__ = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase )
# Train baseline
SCREAMING_SNAKE_CASE__ = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE__),(SCREAMING_SNAKE_CASE__)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__ = optimizer.state_dict()
SCREAMING_SNAKE_CASE__ = train(3 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((SCREAMING_SNAKE_CASE__),(SCREAMING_SNAKE_CASE__)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = dummy_dataloaders()
SCREAMING_SNAKE_CASE__ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.load_state(os.path.join(__lowerCamelCase , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE__),(SCREAMING_SNAKE_CASE__)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__ = optimizer.state_dict()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = train(2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowerCamelCase , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((SCREAMING_SNAKE_CASE__),(SCREAMING_SNAKE_CASE__)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__ = optimizer.state_dict()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE__ = Accelerator()
with self.assertRaises(__lowerCamelCase ) as ve:
accelerator.register_for_checkpointing(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def lowercase_ ( self : str ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = torch.optim.lr_scheduler.StepLR(__lowerCamelCase , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = dummy_dataloaders()
SCREAMING_SNAKE_CASE__ = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase )
# Train baseline
SCREAMING_SNAKE_CASE__ = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE__ = scheduler.state_dict()
train(3 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.assertNotEqual(__lowerCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowerCamelCase , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(__lowerCamelCase , scheduler.state_dict() )
def lowercase_ ( self : Dict ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = ProjectConfiguration(automatic_checkpoint_naming=__lowerCamelCase , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE__ = Accelerator(project_dir=__lowerCamelCase , project_config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(__lowerCamelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__lowerCamelCase , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = '''/tmp/accelerate/state_checkpointing'''
_SCREAMING_SNAKE_CASE : str = DummyModel()
_SCREAMING_SNAKE_CASE : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_SCREAMING_SNAKE_CASE : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = dummy_dataloaders()
_SCREAMING_SNAKE_CASE : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_SCREAMING_SNAKE_CASE : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_SCREAMING_SNAKE_CASE : int = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
_SCREAMING_SNAKE_CASE : Tuple = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
_SCREAMING_SNAKE_CASE : Tuple = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
_SCREAMING_SNAKE_CASE : Optional[Any] = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 314
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
| 1
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : NestedDataStructureLike[PathLike] , __lowerCamelCase : Optional[NamedSplit] = None , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ) -> Union[str, Any]:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def lowercase_ ( self : Optional[int] ) -> Any:
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 314
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Dict=7 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Union[str, Any]=18 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : Optional[int]=400 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=[0.48145466, 0.4578275, 0.40821073] , __lowerCamelCase : List[str]=[0.26862954, 0.26130258, 0.27577711] , __lowerCamelCase : Optional[int]=True , ) -> str:
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_convert_rgb
def lowercase_ ( self : Optional[Any] ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : List[str]=False , __lowerCamelCase : str=False , __lowerCamelCase : Any=False ) -> Dict:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
SCREAMING_SNAKE_CASE__ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
SCREAMING_SNAKE_CASE__ = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
SCREAMING_SNAKE_CASE__ = [torch.from_numpy(__lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = ChineseCLIPImageProcessingTester(self , do_center_crop=__lowerCamelCase )
@property
def lowercase_ ( self : Optional[int] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) )
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowercase_ ( self : str ) -> Dict:
pass
def lowercase_ ( self : int ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self : List[Any] ) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self : Optional[Any] ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 3
@property
def lowercase_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) )
def lowercase_ ( self : str ) -> List[str]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 314
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314
| 1
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''T5Config'''
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = jnp.zeros_like(_A )
SCREAMING_SNAKE_CASE__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
SCREAMING_SNAKE_CASE__ = shifted_input_ids.at[:, 0].set(_A )
SCREAMING_SNAKE_CASE__ = jnp.where(shifted_input_ids == -1_00 , _A , _A )
return shifted_input_ids
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "mt5"
a = MTaConfig
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "mt5"
a = MTaConfig
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "mt5"
a = MTaConfig
| 314
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = 42
def __init__( self : List[str] , __lowerCamelCase : UNetaDModel , __lowerCamelCase : KarrasVeScheduler ) -> Tuple:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : Dict , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , **__lowerCamelCase : Dict , ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE__ = self.unet.config.sample_size
SCREAMING_SNAKE_CASE__ = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE__ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE__ = self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE__ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.scheduler.add_noise_to_input(__lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE__ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE__ = self.scheduler.step_correct(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE__ = step_output.prev_sample
SCREAMING_SNAKE_CASE__ = (sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_SCREAMING_SNAKE_CASE : Union[str, Any] = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_A , id=_A )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ = 0
# Doctest custom flag to ignore output.
_SCREAMING_SNAKE_CASE : List[Any] = doctest.register_optionflag('''IGNORE_RESULT''')
_SCREAMING_SNAKE_CASE : int = doctest.OutputChecker
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = CustomOutputChecker
_SCREAMING_SNAKE_CASE : int = HfDoctestModule
_SCREAMING_SNAKE_CASE : int = HfDocTestParser
| 314
|
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 1
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_SCREAMING_SNAKE_CASE : int = {'''UserAgent''': UserAgent().random}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = script.contents[0]
SCREAMING_SNAKE_CASE__ = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : int ) -> str:
SCREAMING_SNAKE_CASE__ = f'''https://www.instagram.com/{username}/'''
SCREAMING_SNAKE_CASE__ = self.get_json()
def lowercase_ ( self : int ) -> dict:
SCREAMING_SNAKE_CASE__ = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE__ = BeautifulSoup(__lowerCamelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ) -> str:
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : Dict ) -> str:
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowercase_ ( self : Optional[int] ) -> str:
return self.user_data["username"]
@property
def lowercase_ ( self : Any ) -> str:
return self.user_data["full_name"]
@property
def lowercase_ ( self : Tuple ) -> str:
return self.user_data["biography"]
@property
def lowercase_ ( self : Any ) -> str:
return self.user_data["business_email"]
@property
def lowercase_ ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def lowercase_ ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase_ ( self : Union[str, Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def lowercase_ ( self : int ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase_ ( self : List[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def lowercase_ ( self : str ) -> bool:
return self.user_data["is_verified"]
@property
def lowercase_ ( self : Dict ) -> bool:
return self.user_data["is_private"]
def UpperCAmelCase_ ( _A = "github" ):
'''simple docstring'''
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE__ = InstagramUser(_A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : Dict = InstagramUser('''github''')
print(instagram_user)
print(F"{instagram_user.number_of_posts = }")
print(F"{instagram_user.number_of_followers = }")
print(F"{instagram_user.number_of_followings = }")
print(F"{instagram_user.email = }")
print(F"{instagram_user.website = }")
print(F"{instagram_user.profile_picture_url = }")
print(F"{instagram_user.is_verified = }")
print(F"{instagram_user.is_private = }")
| 314
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 1
|
import collections
import importlib.util
import os
import re
from pathlib import Path
_SCREAMING_SNAKE_CASE : Any = '''src/transformers'''
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE : List[str] = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_SCREAMING_SNAKE_CASE : int = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_SCREAMING_SNAKE_CASE : str = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_SCREAMING_SNAKE_CASE : str = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_SCREAMING_SNAKE_CASE : Dict = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_SCREAMING_SNAKE_CASE : Any = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r'''^\s*try:''')
# Catches a line with else:
_SCREAMING_SNAKE_CASE : List[str] = re.compile(r'''^\s*else:''')
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if _re_test_backend.search(_A ) is None:
return None
SCREAMING_SNAKE_CASE__ = [b[0] for b in _re_backend.findall(_A )]
backends.sort()
return "_and_".join(_A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
with open(_A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
SCREAMING_SNAKE_CASE__ = 0
while line_index < len(_A ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_A ):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE__ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
SCREAMING_SNAKE_CASE__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_A ):
SCREAMING_SNAKE_CASE__ = _re_one_line_import_struct.search(_A ).groups()[0]
SCREAMING_SNAKE_CASE__ = re.findall('''\[([^\]]+)\]''' , _A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
SCREAMING_SNAKE_CASE__ = _re_import_struct_key_value.search(_A )
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_A ) > 0]
objects.extend(_A )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
if _re_import_struct_add_one.search(_A ) is not None:
objects.append(_re_import_struct_add_one.search(_A ).groups()[0] )
elif _re_import_struct_add_many.search(_A ) is not None:
SCREAMING_SNAKE_CASE__ = _re_import_struct_add_many.search(_A ).groups()[0].split(''', ''' )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_between_brackets.search(_A ) is not None:
SCREAMING_SNAKE_CASE__ = _re_between_brackets.search(_A ).groups()[0].split(''', ''' )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_quote_object.search(_A ) is not None:
objects.append(_re_quote_object.search(_A ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE__ = []
while (
line_index < len(_A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_A ):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
def find_duplicates(_A ):
return [k for k, v in collections.Counter(_A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE__ = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
SCREAMING_SNAKE_CASE__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
SCREAMING_SNAKE_CASE__ = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE__ = os.path.join(_A , '''__init__.py''' )
SCREAMING_SNAKE_CASE__ = parse_init(_A )
if objects is not None:
SCREAMING_SNAKE_CASE__ = analyze_results(*_A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_A ) )
if len(_A ) > 0:
raise ValueError('''\n\n'''.join(_A ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for path, directories, files in os.walk(_A ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_A ) / folder).glob('''*.py''' ) ) ) == 0:
continue
SCREAMING_SNAKE_CASE__ = str((Path(_A ) / folder).relative_to(_A ) )
SCREAMING_SNAKE_CASE__ = short_path.replace(os.path.sep , '''.''' )
submodules.append(_A )
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE__ = str((Path(_A ) / fname).relative_to(_A ) )
SCREAMING_SNAKE_CASE__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_A )
return submodules
_SCREAMING_SNAKE_CASE : Tuple = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_A , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
SCREAMING_SNAKE_CASE__ = spec.loader.load_module()
SCREAMING_SNAKE_CASE__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 1
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 1
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_SCREAMING_SNAKE_CASE : Optional[int] = '''Usage of script: script_name <size_of_canvas:int>'''
_SCREAMING_SNAKE_CASE : Optional[Any] = [0] * 100 + [1] * 10
random.shuffle(choice)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[False for i in range(_A )] for j in range(_A )]
return canvas
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
for i, row in enumerate(_A ):
for j, _ in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = bool(random.getrandbits(1 ) )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
SCREAMING_SNAKE_CASE__ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_A ):
for c, pt in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = __judge_point(
_A , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
SCREAMING_SNAKE_CASE__ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
SCREAMING_SNAKE_CASE__ = current_canvas.tolist()
return return_canvas
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
SCREAMING_SNAKE_CASE__ = pt
if pt:
if alive < 2:
SCREAMING_SNAKE_CASE__ = False
elif alive == 2 or alive == 3:
SCREAMING_SNAKE_CASE__ = True
elif alive > 3:
SCREAMING_SNAKE_CASE__ = False
else:
if alive == 3:
SCREAMING_SNAKE_CASE__ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_SCREAMING_SNAKE_CASE : Dict = int(sys.argv[1])
# main working structure of this module.
_SCREAMING_SNAKE_CASE : Optional[Any] = create_canvas(canvas_size)
seed(c)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = plt.subplots()
fig.show()
_SCREAMING_SNAKE_CASE : Optional[int] = ListedColormap(['''w''', '''k'''])
try:
while True:
_SCREAMING_SNAKE_CASE : Dict = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
SCREAMING_SNAKE_CASE__ = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_A )-1}''' )
if "norm" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE__ = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_A )-1}''' )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE__ = key[key.find('''block''' ) + len('''block''' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'''block{idx}''' , F'''block.{int(_A )-1}''' )
if "attn.q" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
SCREAMING_SNAKE_CASE__ = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE__ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_A )-1}''' )
if key.startswith('''head''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''head''' , '''classifier''' )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE__ = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE__ = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE__ = kv_bias[
config.hidden_sizes[i] :
]
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SegformerConfig()
SCREAMING_SNAKE_CASE__ = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
if "segformer" in model_name:
SCREAMING_SNAKE_CASE__ = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE__ = 1_50
SCREAMING_SNAKE_CASE__ = '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE__ = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = '''cityscapes-id2label.json'''
SCREAMING_SNAKE_CASE__ = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_name[4:6]
SCREAMING_SNAKE_CASE__ = 10_00
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = (1, 10_00)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(_A ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 2_56
elif size == "b2":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 4, 18, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 8, 27, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A )
# prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_A , return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location=torch.device('''cpu''' ) )
else:
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(_A , encoder_only=_A )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_A , _A )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = SegformerForImageClassification(_A )
else:
SCREAMING_SNAKE_CASE__ = SegformerForSemanticSegmentation(_A )
model.load_state_dict(_A )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE__ = model(_A )
SCREAMING_SNAKE_CASE__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
SCREAMING_SNAKE_CASE__ = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _A , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 314
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
| 1
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCAmelCase_ ( _A , _A , _A = 10**-10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = a
while True:
SCREAMING_SNAKE_CASE__ = Decimal(_A ) - (
Decimal(eval(_A ) ) / Decimal(eval(str(diff(_A ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_A ) ) < precision: # noqa: S307
return float(_A )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 314
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 1
|
import argparse
_SCREAMING_SNAKE_CASE : Optional[int] = '''docs/source/_static/js/custom.js'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
with open(_A , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
SCREAMING_SNAKE_CASE__ = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
SCREAMING_SNAKE_CASE__ = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 314
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "encodec"
def __init__( self : List[str] , __lowerCamelCase : Optional[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , __lowerCamelCase : Dict=2_4000 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=128 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : int=1 , __lowerCamelCase : Dict=[8, 5, 4, 2] , __lowerCamelCase : int="weight_norm" , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]="reflect" , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple=1.0 , __lowerCamelCase : List[Any]=1024 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=True , **__lowerCamelCase : Dict , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = target_bandwidths
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = audio_channels
SCREAMING_SNAKE_CASE__ = normalize
SCREAMING_SNAKE_CASE__ = chunk_length_s
SCREAMING_SNAKE_CASE__ = overlap
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_filters
SCREAMING_SNAKE_CASE__ = num_residual_layers
SCREAMING_SNAKE_CASE__ = upsampling_ratios
SCREAMING_SNAKE_CASE__ = norm_type
SCREAMING_SNAKE_CASE__ = kernel_size
SCREAMING_SNAKE_CASE__ = last_kernel_size
SCREAMING_SNAKE_CASE__ = residual_kernel_size
SCREAMING_SNAKE_CASE__ = dilation_growth_rate
SCREAMING_SNAKE_CASE__ = use_causal_conv
SCREAMING_SNAKE_CASE__ = pad_mode
SCREAMING_SNAKE_CASE__ = compress
SCREAMING_SNAKE_CASE__ = num_lstm_layers
SCREAMING_SNAKE_CASE__ = trim_right_ratio
SCREAMING_SNAKE_CASE__ = codebook_size
SCREAMING_SNAKE_CASE__ = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE__ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__lowerCamelCase )
@property
def lowercase_ ( self : int ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase_ ( self : int ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowercase_ ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowercase_ ( self : Dict ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 314
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 1
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_SCREAMING_SNAKE_CASE : Dict = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE : str = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_SCREAMING_SNAKE_CASE : List[str] = spec.loader.load_module()
_SCREAMING_SNAKE_CASE : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_SCREAMING_SNAKE_CASE : Any = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
_SCREAMING_SNAKE_CASE : Any = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE__ = False
# source code of `config_class`
SCREAMING_SNAKE_CASE__ = inspect.getsource(_A )
SCREAMING_SNAKE_CASE__ = _re_checkpoint.findall(_A )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE__ = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE__ = True
break
SCREAMING_SNAKE_CASE__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = '''\n'''.join(sorted(_A ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 314
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = BlipImageProcessor()
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
SCREAMING_SNAKE_CASE__ = BlipaProcessor(__lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self : List[Any] , **__lowerCamelCase : Any ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def lowercase_ ( self : Tuple , **__lowerCamelCase : Any ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def lowercase_ ( self : Dict ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 314
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314
| 1
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_SCREAMING_SNAKE_CASE : Any = {
'''E''': 1_2.7_0,
'''T''': 9.0_6,
'''A''': 8.1_7,
'''O''': 7.5_1,
'''I''': 6.9_7,
'''N''': 6.7_5,
'''S''': 6.3_3,
'''H''': 6.0_9,
'''R''': 5.9_9,
'''D''': 4.2_5,
'''L''': 4.0_3,
'''C''': 2.7_8,
'''U''': 2.7_6,
'''M''': 2.4_1,
'''W''': 2.3_6,
'''F''': 2.2_3,
'''G''': 2.0_2,
'''Y''': 1.9_7,
'''P''': 1.9_3,
'''B''': 1.2_9,
'''V''': 0.9_8,
'''K''': 0.7_7,
'''J''': 0.1_5,
'''X''': 0.1_5,
'''Q''': 0.1_0,
'''Z''': 0.0_7,
}
_SCREAMING_SNAKE_CASE : List[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
_SCREAMING_SNAKE_CASE : List[str] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return x[0]
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_letter_count(_A )
SCREAMING_SNAKE_CASE__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_A )
SCREAMING_SNAKE_CASE__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_A )
SCREAMING_SNAKE_CASE__ = ''''''.join(freq_to_letter[freq] )
SCREAMING_SNAKE_CASE__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_A , reverse=_A )
SCREAMING_SNAKE_CASE__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_frequency_order(_A )
SCREAMING_SNAKE_CASE__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (UnCLIPScheduler,)
def lowercase_ ( self : List[str] , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def lowercase_ ( self : int ) -> Tuple:
pass
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
| 314
| 1
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = emb.weight.shape
SCREAMING_SNAKE_CASE__ = nn.Linear(_A , _A , bias=_A )
SCREAMING_SNAKE_CASE__ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( _A , _A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE__ = key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
SCREAMING_SNAKE_CASE__ = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE__ = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE__ = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE__ = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
SCREAMING_SNAKE_CASE__ = state_dict[old_key]
return new_dict
def UpperCAmelCase_ ( _A , _A , _A , _A , _A = WEIGHTS_NAME ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
SCREAMING_SNAKE_CASE__ = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_A ):
SCREAMING_SNAKE_CASE__ = torch.load(_A )['''model''']
remove_ignore_keys_(_A )
SCREAMING_SNAKE_CASE__ = rename_fairseq_keys(_A , _A )
SCREAMING_SNAKE_CASE__ = os.path.join(
_A , weights_name.replace('''.bin''' , F'''-{len(_A )+1:05d}-of-???.bin''' ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE__ = os.path.join(_A , weights_name.replace('''.bin''' , F'''-{len(_A )+1:05d}-of-???.bin''' ) )
SCREAMING_SNAKE_CASE__ = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_A )
SCREAMING_SNAKE_CASE__ = rename_fairseq_keys(_A , _A )
SCREAMING_SNAKE_CASE__ = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE__ = {}
for idx, shard in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(_A ):05d}.bin''' )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
SCREAMING_SNAKE_CASE__ = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE__ = {'''total_size''': total_size}
SCREAMING_SNAKE_CASE__ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_A , _A ) , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ = json.dumps(_A , indent=2 , sort_keys=_A ) + '''\n'''
f.write(_A )
return metadata, index
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_SCREAMING_SNAKE_CASE : int = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_SCREAMING_SNAKE_CASE : Union[str, Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 314
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 1
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_SCREAMING_SNAKE_CASE : Any = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Tuple = 0xE_0_0_0
_SCREAMING_SNAKE_CASE : str = 0xE_0_0_1
_SCREAMING_SNAKE_CASE : List[str] = 0xE_0_0_2
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0xE_0_0_3
_SCREAMING_SNAKE_CASE : Optional[Any] = 0xE_0_0_4
# Maps special codepoints to human-readable names.
_SCREAMING_SNAKE_CASE : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_SCREAMING_SNAKE_CASE : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=chr(__lowerCamelCase ) , __lowerCamelCase : List[Any]=chr(__lowerCamelCase ) , __lowerCamelCase : List[str]=chr(__lowerCamelCase ) , __lowerCamelCase : List[Any]=chr(__lowerCamelCase ) , __lowerCamelCase : int=chr(__lowerCamelCase ) , __lowerCamelCase : str=chr(__lowerCamelCase ) , __lowerCamelCase : int=False , __lowerCamelCase : int=2048 , **__lowerCamelCase : Optional[int] , ) -> int:
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , model_max_length=__lowerCamelCase , **__lowerCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def lowercase_ ( self : Tuple ) -> int:
return self._unicode_vocab_size
def lowercase_ ( self : Dict , __lowerCamelCase : str ) -> List[str]:
return list(__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : str ) -> int:
try:
return ord(__lowerCamelCase )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def lowercase_ ( self : int , __lowerCamelCase : int ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__lowerCamelCase )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def lowercase_ ( self : Tuple , __lowerCamelCase : Tuple ) -> List[str]:
return "".join(__lowerCamelCase )
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowercase_ ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__lowerCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__lowerCamelCase )) + [1]
return result
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowercase_ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple:
return ()
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase_ ( _A , _A=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def UpperCAmelCase_ ( _A , _A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for old_item in old_list:
SCREAMING_SNAKE_CASE__ = old_item.replace('''in_layers.0''' , '''norm1''' )
SCREAMING_SNAKE_CASE__ = new_item.replace('''in_layers.2''' , '''conv1''' )
SCREAMING_SNAKE_CASE__ = new_item.replace('''out_layers.0''' , '''norm2''' )
SCREAMING_SNAKE_CASE__ = new_item.replace('''out_layers.3''' , '''conv2''' )
SCREAMING_SNAKE_CASE__ = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
SCREAMING_SNAKE_CASE__ = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
SCREAMING_SNAKE_CASE__ = shave_segments(_A , n_shave_prefix_segments=_A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def UpperCAmelCase_ ( _A , _A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for old_item in old_list:
SCREAMING_SNAKE_CASE__ = old_item
SCREAMING_SNAKE_CASE__ = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
SCREAMING_SNAKE_CASE__ = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
SCREAMING_SNAKE_CASE__ = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
SCREAMING_SNAKE_CASE__ = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
SCREAMING_SNAKE_CASE__ = shave_segments(_A , n_shave_prefix_segments=_A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def UpperCAmelCase_ ( _A , _A , _A , _A=None , _A=None , _A=None ):
'''simple docstring'''
assert isinstance(_A , _A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
SCREAMING_SNAKE_CASE__ = old_checkpoint[path]
SCREAMING_SNAKE_CASE__ = old_tensor.shape[0] // 3
SCREAMING_SNAKE_CASE__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
SCREAMING_SNAKE_CASE__ = old_tensor.shape[0] // config['''num_head_channels'''] // 3
SCREAMING_SNAKE_CASE__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = old_tensor.split(channels // num_heads , dim=1 )
SCREAMING_SNAKE_CASE__ = query.reshape(_A )
SCREAMING_SNAKE_CASE__ = key.reshape(_A )
SCREAMING_SNAKE_CASE__ = value.reshape(_A )
for path in paths:
SCREAMING_SNAKE_CASE__ = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
SCREAMING_SNAKE_CASE__ = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
SCREAMING_SNAKE_CASE__ = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
SCREAMING_SNAKE_CASE__ = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
SCREAMING_SNAKE_CASE__ = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
SCREAMING_SNAKE_CASE__ = old_checkpoint[path['''old''']][:, :, 0]
else:
SCREAMING_SNAKE_CASE__ = old_checkpoint[path['''old''']]
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.2.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.2.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint['''input_blocks.0.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''input_blocks.0.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint['''out.0.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''out.0.bias''']
SCREAMING_SNAKE_CASE__ = checkpoint['''out.2.weight''']
SCREAMING_SNAKE_CASE__ = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
SCREAMING_SNAKE_CASE__ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(_A )
}
# Retrieves the keys for the middle blocks only
SCREAMING_SNAKE_CASE__ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(_A )
}
# Retrieves the keys for the output blocks only
SCREAMING_SNAKE_CASE__ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(_A )
}
for i in range(1 , _A ):
SCREAMING_SNAKE_CASE__ = (i - 1) // (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ = (i - 1) % (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
SCREAMING_SNAKE_CASE__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
SCREAMING_SNAKE_CASE__ = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
SCREAMING_SNAKE_CASE__ = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
SCREAMING_SNAKE_CASE__ = renew_resnet_paths(_A )
SCREAMING_SNAKE_CASE__ = {'''old''': F'''input_blocks.{i}.0''', '''new''': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
SCREAMING_SNAKE_CASE__ = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
_A , _A , _A , additional_replacements=[meta_path, resnet_op] , config=_A )
if len(_A ):
SCREAMING_SNAKE_CASE__ = renew_attention_paths(_A )
SCREAMING_SNAKE_CASE__ = {
'''old''': F'''input_blocks.{i}.1''',
'''new''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
SCREAMING_SNAKE_CASE__ = {
F'''input_blocks.{i}.1.qkv.bias''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_A , _A , _A , additional_replacements=[meta_path] , attention_paths_to_split=_A , config=_A , )
SCREAMING_SNAKE_CASE__ = middle_blocks[0]
SCREAMING_SNAKE_CASE__ = middle_blocks[1]
SCREAMING_SNAKE_CASE__ = middle_blocks[2]
SCREAMING_SNAKE_CASE__ = renew_resnet_paths(_A )
assign_to_checkpoint(_A , _A , _A , config=_A )
SCREAMING_SNAKE_CASE__ = renew_resnet_paths(_A )
assign_to_checkpoint(_A , _A , _A , config=_A )
SCREAMING_SNAKE_CASE__ = renew_attention_paths(_A )
SCREAMING_SNAKE_CASE__ = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
_A , _A , _A , attention_paths_to_split=_A , config=_A )
for i in range(_A ):
SCREAMING_SNAKE_CASE__ = i // (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ = i % (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ = [shave_segments(_A , 2 ) for name in output_blocks[i]]
SCREAMING_SNAKE_CASE__ = {}
for layer in output_block_layers:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = layer.split('''.''' )[0], shave_segments(_A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_A )
else:
SCREAMING_SNAKE_CASE__ = [layer_name]
if len(_A ) > 1:
SCREAMING_SNAKE_CASE__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
SCREAMING_SNAKE_CASE__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
SCREAMING_SNAKE_CASE__ = renew_resnet_paths(_A )
SCREAMING_SNAKE_CASE__ = renew_resnet_paths(_A )
SCREAMING_SNAKE_CASE__ = {'''old''': F'''output_blocks.{i}.0''', '''new''': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
SCREAMING_SNAKE_CASE__ = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
SCREAMING_SNAKE_CASE__ = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
SCREAMING_SNAKE_CASE__ = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(_A ) == 2:
SCREAMING_SNAKE_CASE__ = []
if len(_A ):
SCREAMING_SNAKE_CASE__ = renew_attention_paths(_A )
SCREAMING_SNAKE_CASE__ = {
'''old''': F'''output_blocks.{i}.1''',
'''new''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
SCREAMING_SNAKE_CASE__ = {
F'''output_blocks.{i}.1.qkv.bias''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_A , _A , _A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=_A , )
else:
SCREAMING_SNAKE_CASE__ = renew_resnet_paths(_A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
SCREAMING_SNAKE_CASE__ = '''.'''.join(['''output_blocks''', str(_A ), path['''old''']] )
SCREAMING_SNAKE_CASE__ = '''.'''.join(['''up_blocks''', str(_A ), '''resnets''', str(_A ), path['''new''']] )
SCREAMING_SNAKE_CASE__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Tuple = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read())
_SCREAMING_SNAKE_CASE : Optional[Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_SCREAMING_SNAKE_CASE : Dict = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_SCREAMING_SNAKE_CASE : List[str] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_SCREAMING_SNAKE_CASE : Tuple = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 314
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_SCREAMING_SNAKE_CASE : str = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
_SCREAMING_SNAKE_CASE : int = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = cn.convert_to_negative(_A )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_A , 1_10 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE__ = canny.canny(_A )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
SCREAMING_SNAKE_CASE__ = conv.img_convolve(_A , _A ).astype(_A )
assert res.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
assert med.median_filter(_A , 3 ).any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = sob.sobel_filter(_A )
assert grad.any() and theta.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = sp.make_sepia(_A , 20 )
assert sepia.all()
def UpperCAmelCase_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = bs.Burkes(imread(_A , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = rs.NearestNeighbour(imread(_A , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE__ = imread(_A , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE__ = lbp.get_neighbors_pixel(
_A , _A , _A , _A )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE__ = lbp.local_binary_value(_A , _A , _A )
assert lbp_image.any()
| 314
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A , _A=False , _A=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(_A )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
__lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def lowercase_ ( self : Tuple ) -> List[str]:
return self._images
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
return self._labels
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self._num_examples
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._epochs_completed
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str:
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
SCREAMING_SNAKE_CASE__ = (
'''Validation size should be between 0 and '''
F'''{len(_A )}. Received: {validation_size}.'''
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 314
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for data in source_data:
for i, el in enumerate(_A ):
if len(_A ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_A ) )
return data_lists
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for dlist, weight in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = min(_A )
SCREAMING_SNAKE_CASE__ = max(_A )
SCREAMING_SNAKE_CASE__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE__ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(_A )
score_lists.append(_A )
return score_lists
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = final_scores[j] + ele
return final_scores
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_data(_A )
SCREAMING_SNAKE_CASE__ = calculate_each_score(_A , _A )
SCREAMING_SNAKE_CASE__ = generate_final_scores(_A )
# append scores to source data
for i, ele in enumerate(_A ):
source_data[i].append(_A )
return source_data
| 314
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
| 1
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE : List[Any] = get_logger(__name__)
def UpperCAmelCase_ ( _A , _A , _A , _A , _A=0 ):
'''simple docstring'''
os.makedirs(_A , exist_ok=_A )
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE__ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_A , _A )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE__ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_A , _A )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE__ = os.path.join(_A , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_A , exist_ok=_A )
logger.info(F'''Saving model to {ckpt_dir}''' )
SCREAMING_SNAKE_CASE__ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=_A , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def UpperCAmelCase_ ( _A , _A , _A , _A , _A=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_A ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
SCREAMING_SNAKE_CASE__ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
logger.info(F'''Loading model from {input_model_file}''' )
SCREAMING_SNAKE_CASE__ = torch.load(_A )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE__ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
logger.info(F'''Loading model from {input_model_file}''' )
SCREAMING_SNAKE_CASE__ = torch.load(_A )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE__ = (
os.path.join(_A , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE__ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=_A , storage_reader=dist_cp.FileSystemReader(_A ) , planner=DefaultLoadPlanner() , )
SCREAMING_SNAKE_CASE__ = state_dict['''model''']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_A )
def UpperCAmelCase_ ( _A , _A , _A , _A , _A , _A=0 ):
'''simple docstring'''
os.makedirs(_A , exist_ok=_A )
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE__ = FSDP.optim_state_dict(_A , _A )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE__ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_A , _A )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
SCREAMING_SNAKE_CASE__ = os.path.join(_A , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_A , exist_ok=_A )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def UpperCAmelCase_ ( _A , _A , _A , _A , _A , _A=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE__ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
SCREAMING_SNAKE_CASE__ = torch.load(_A )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
SCREAMING_SNAKE_CASE__ = (
os.path.join(_A , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(_A ) , )
SCREAMING_SNAKE_CASE__ = optim_state['''optimizer''']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE__ = FSDP.optim_state_dict_to_load(_A , _A , _A )
optimizer.load_state_dict(_A )
| 314
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
| 1
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE : Dict = False
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self : int ) -> Tuple:
return 12
@property
def lowercase_ ( self : Any ) -> Dict:
return 12
@property
def lowercase_ ( self : Tuple ) -> Any:
return 32
@property
def lowercase_ ( self : Optional[Any] ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self : List[Any] ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCamelCase )
@property
def lowercase_ ( self : str ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
SCREAMING_SNAKE_CASE__ = TransformeraDModel(**__lowerCamelCase )
return model
def lowercase_ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.dummy_vqvae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_transformer
SCREAMING_SNAKE_CASE__ = VQDiffusionScheduler(self.num_embed )
SCREAMING_SNAKE_CASE__ = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = VQDiffusionPipeline(
vqvae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , transformer=__lowerCamelCase , scheduler=__lowerCamelCase , learned_classifier_free_sampling_embeddings=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''teddy bear playing in the pool'''
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
[prompt] , generator=__lowerCamelCase , output_type='''np''' , return_dict=__lowerCamelCase , num_inference_steps=2 )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.dummy_vqvae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_transformer
SCREAMING_SNAKE_CASE__ = VQDiffusionScheduler(self.num_embed )
SCREAMING_SNAKE_CASE__ = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowerCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
SCREAMING_SNAKE_CASE__ = VQDiffusionPipeline(
vqvae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , transformer=__lowerCamelCase , scheduler=__lowerCamelCase , learned_classifier_free_sampling_embeddings=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''teddy bear playing in the pool'''
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
[prompt] , generator=__lowerCamelCase , output_type='''np''' , return_dict=__lowerCamelCase , num_inference_steps=2 )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
SCREAMING_SNAKE_CASE__ = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
SCREAMING_SNAKE_CASE__ = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowerCamelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 314
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314
| 1
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCAmelCase_ ( _A , _A , _A , _A = 1_00 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = x_start
SCREAMING_SNAKE_CASE__ = fnc(_A )
SCREAMING_SNAKE_CASE__ = 0.0
for _ in range(_A ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE__ = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE__ = fnc(_A )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE__ = xa
SCREAMING_SNAKE_CASE__ = fxa
return length
if __name__ == "__main__":
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
_SCREAMING_SNAKE_CASE : Any = 10
while i <= 100000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 314
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
| 1
|
from math import factorial
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_A ) // (factorial(_A ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
F"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'''If a class of 40 students must be arranged into groups of''',
F"4 for group projects, there are {combinations(40, 4)} ways",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
F"are {combinations(10, 3)} ways that first, second and",
'''third place can be awarded.''',
)
| 314
|
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(_A )
for i in range(1 , _A ):
SCREAMING_SNAKE_CASE__ = collection[i]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = i - 1
while low <= high:
SCREAMING_SNAKE_CASE__ = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE__ = mid - 1
else:
SCREAMING_SNAKE_CASE__ = mid + 1
for j in range(_A , _A , -1 ):
SCREAMING_SNAKE_CASE__ = collection[j - 1]
SCREAMING_SNAKE_CASE__ = val
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
_SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 314
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 1
|
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_A ):
for j in range(_A ):
SCREAMING_SNAKE_CASE__ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_SCREAMING_SNAKE_CASE : Optional[int] = imread('''image_data/lena.jpg''', 1)
# convert to its negative
_SCREAMING_SNAKE_CASE : Union[str, Any] = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 314
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : List[Any]=30 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Any=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : int=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=2 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 2
def lowercase_ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Optional[int] ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : int ) -> str:
SCREAMING_SNAKE_CASE__ = TFDeiTModel(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TFDeiTForMaskedImageModeling(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = TFDeiTForMaskedImageModeling(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = TFDeiTForImageClassification(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = TFDeiTForImageClassification(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
a = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
a = False
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def lowercase_ ( self : str ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def lowercase_ ( self : List[Any] ) -> Tuple:
pass
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Dense ) )
def lowercase_ ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=False ) -> str:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase_ ( self : str ) -> List[Any]:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFDeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : List[str] ) -> Any:
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__lowerCamelCase , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 314
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 1
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece.model''')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
_SCREAMING_SNAKE_CASE : int = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = CamembertTokenizer
a = CamembertTokenizerFast
a = True
a = True
def lowercase_ ( self : Any ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = CamembertTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = '''<pad>'''
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowerCamelCase ) , 1004 )
def lowercase_ ( self : int ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowercase_ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = CamembertTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : str ) -> List[Any]:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowercase_ ( self : int ) -> Union[str, Any]:
# fmt: off
SCREAMING_SNAKE_CASE__ = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=__lowerCamelCase , )
| 314
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
| 1
|
import functools
from typing import Any
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if not isinstance(_A , _A ) or len(_A ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(_A , _A ) or not all(
isinstance(_A , _A ) and len(_A ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = '''WORD_KEEPER'''
for word in words:
SCREAMING_SNAKE_CASE__ = trie
for c in word:
if c not in trie_node:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = trie_node[c]
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = len(_A )
# Dynamic programming method
@functools.cache
def is_breakable(_A ) -> bool:
if index == len_string:
return True
SCREAMING_SNAKE_CASE__ = trie
for i in range(_A , _A ):
SCREAMING_SNAKE_CASE__ = trie_node.get(string[i] , _A )
if trie_node is None:
return False
if trie_node.get(_A , _A ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 1
|
import torch
from diffusers import DiffusionPipeline
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> List[str]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
def __call__( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = scheduler_output - scheduler_output + torch.ones_like(__lowerCamelCase )
return result
| 314
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 1
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_SCREAMING_SNAKE_CASE : Optional[Any] = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_SCREAMING_SNAKE_CASE : int = concatenate_datasets
_SCREAMING_SNAKE_CASE : List[Any] = DownloadConfig
_SCREAMING_SNAKE_CASE : List[str] = DownloadManager
_SCREAMING_SNAKE_CASE : Dict = DownloadMode
_SCREAMING_SNAKE_CASE : Any = DownloadConfig
_SCREAMING_SNAKE_CASE : Optional[int] = DownloadMode
_SCREAMING_SNAKE_CASE : Optional[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 314
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 1
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _A , )
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image[0].size
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(_A , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(_A ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(_A )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(_A , dim=0 )
return image
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if isinstance(_A , torch.Tensor ):
return mask
elif isinstance(_A , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mask[0].size
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE__ = np.concatenate(_A , axis=0 )
SCREAMING_SNAKE_CASE__ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = torch.from_numpy(_A )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(_A , dim=0 )
return mask
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = 42
def __init__( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : Dict , __lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] , __lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] , __lowerCamelCase : int = 250 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 10 , __lowerCamelCase : int = 10 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = _preprocess_image(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = _preprocess_mask(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = original_image.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.device )
SCREAMING_SNAKE_CASE__ = eta
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE__ = generator[0] if isinstance(__lowerCamelCase , __lowerCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE__ = self.scheduler.undo_step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str , __lowerCamelCase : str ) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
SCREAMING_SNAKE_CASE__ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = '''sgugger/tiny-distilbert-classification'''
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase , [config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase , [config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase , [config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase , configs=[config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__lowerCamelCase , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(__lowerCamelCase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(__lowerCamelCase , '''env.csv''' ) , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCamelCase , '''env.csv''' ) ).exists() )
def lowercase_ ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__lowerCamelCase : Optional[Any] ):
self.assertTrue(hasattr(__lowerCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''current''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , '''log.txt''' ) , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__lowerCamelCase , '''log.txt''' ) ).exists() )
| 314
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314
| 1
|
_SCREAMING_SNAKE_CASE : Optional[Any] = '''Input must be a string of 8 numbers plus letter'''
_SCREAMING_SNAKE_CASE : List[Any] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
SCREAMING_SNAKE_CASE__ = F'''Expected string as input, found {type(_A ).__name__}'''
raise TypeError(_A )
SCREAMING_SNAKE_CASE__ = spanish_id.replace('''-''' , '''''' ).upper()
if len(_A ) != 9:
raise ValueError(_A )
try:
SCREAMING_SNAKE_CASE__ = int(spanish_id_clean[0:8] )
SCREAMING_SNAKE_CASE__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_A ) from ex
if letter.isdigit():
raise ValueError(_A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (UnCLIPScheduler,)
def lowercase_ ( self : List[str] , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def lowercase_ ( self : int ) -> Tuple:
pass
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
| 314
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int=7 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=18 , __lowerCamelCase : Dict=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=True , __lowerCamelCase : str=False , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=[0.5, 0.5, 0.5] , __lowerCamelCase : str=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''height''': 18, '''width''': 20}
SCREAMING_SNAKE_CASE__ = do_thumbnail
SCREAMING_SNAKE_CASE__ = do_align_axis
SCREAMING_SNAKE_CASE__ = do_pad
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
def lowercase_ ( self : Dict ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = DonutImageProcessor if is_vision_available() else None
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = DonutImageProcessingTester(self )
@property
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
def lowercase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def lowercase_ ( self : int ) -> List[str]:
pass
@is_flaky()
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self : Union[str, Any] ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self : Tuple ) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 314
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 1
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Any=10 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : str=32 * 4 , __lowerCamelCase : Any=32 * 6 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Optional[int]=32 , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ = num_queries
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_size
SCREAMING_SNAKE_CASE__ = max_size
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = mask_feature_size
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCamelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCamelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self : Tuple ) -> Dict:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCamelCase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=False ) -> Any:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = MaskFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(pixel_values=__lowerCamelCase , pixel_mask=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , output_hidden_states=__lowerCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
def comm_check_on_output(__lowerCamelCase : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(pixel_values=__lowerCamelCase , pixel_mask=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
comm_check_on_output(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(
pixel_values=__lowerCamelCase , pixel_mask=__lowerCamelCase , mask_labels=__lowerCamelCase , class_labels=__lowerCamelCase )
comm_check_on_output(__lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCamelCase , **__lowerCamelCase , output_hidden_states=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCamelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self : int ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self : Optional[Any] ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
def lowercase_ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE__ = MaskFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=__lowerCamelCase ).long(),
}
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCamelCase , **__lowerCamelCase , output_hidden_states=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase , output_attentions=__lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self : List[Any] ) -> List[Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , mask_labels=__lowerCamelCase , class_labels=__lowerCamelCase ).loss
loss.backward()
def lowercase_ ( self : Tuple ) -> Any:
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , mask_labels=__lowerCamelCase , class_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_SCREAMING_SNAKE_CASE : Dict = 1e-4
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : List[str] ) -> Any:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
def lowercase_ ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCamelCase )
.eval()
)
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__ = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
SCREAMING_SNAKE_CASE__ = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
def lowercase_ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowerCamelCase )
.eval()
)
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__ = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
SCREAMING_SNAKE_CASE__ = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCamelCase )
.eval()
)
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = inputs['''pixel_values'''].to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [el.to(__lowerCamelCase ) for el in inputs['''mask_labels''']]
SCREAMING_SNAKE_CASE__ = [el.to(__lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 314
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 1
|
from math import sqrt
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
SCREAMING_SNAKE_CASE__ = True
# 0 and 1 are none primes.
if number <= 1:
SCREAMING_SNAKE_CASE__ = False
for divisor in range(2 , int(round(sqrt(_A ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
SCREAMING_SNAKE_CASE__ = False
break
# precondition
assert isinstance(_A , _A ), "'status' must been from type bool"
return status
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
SCREAMING_SNAKE_CASE__ = list(range(2 , n + 1 ) )
SCREAMING_SNAKE_CASE__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_A ) ):
for j in range(i + 1 , len(_A ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
SCREAMING_SNAKE_CASE__ = 0
# filters actual prime numbers.
SCREAMING_SNAKE_CASE__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_A , _A ), "'ans' must been from type list"
return ans
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (n > 2), "'N' must been an int and > 2"
SCREAMING_SNAKE_CASE__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_A ):
ans.append(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type list"
return ans
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and number >= 0, "'number' must been an int and >= 0"
SCREAMING_SNAKE_CASE__ = [] # this list will be returns of the function.
# potential prime number factors.
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = number
if number == 0 or number == 1:
ans.append(_A )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_A ):
while quotient != 1:
if is_prime(_A ) and (quotient % factor == 0):
ans.append(_A )
quotient /= factor
else:
factor += 1
else:
ans.append(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type list"
return ans
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE__ = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE__ = prime_factorization(_A )
SCREAMING_SNAKE_CASE__ = max(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type int"
return ans
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE__ = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE__ = prime_factorization(_A )
SCREAMING_SNAKE_CASE__ = min(_A )
# precondition
assert isinstance(_A , _A ), "'ans' must been from type int"
return ans
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _A ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _A ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert (
isinstance(_A , _A ) and (number > 2) and is_even(_A )
), "'number' must been an int, even and > 2"
SCREAMING_SNAKE_CASE__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
SCREAMING_SNAKE_CASE__ = get_prime_numbers(_A )
SCREAMING_SNAKE_CASE__ = len(_A )
# run variable for while-loops.
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = None
# exit variable. for break up the loops
SCREAMING_SNAKE_CASE__ = True
while i < len_pn and loop:
SCREAMING_SNAKE_CASE__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
SCREAMING_SNAKE_CASE__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_A , _A )
and (len(_A ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE__ = 0
while numbera != 0:
SCREAMING_SNAKE_CASE__ = numbera % numbera
SCREAMING_SNAKE_CASE__ = numbera
SCREAMING_SNAKE_CASE__ = rest
# precondition
assert isinstance(_A , _A ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
SCREAMING_SNAKE_CASE__ = prime_factorization(_A )
SCREAMING_SNAKE_CASE__ = prime_factorization(_A )
elif numbera == 1 or numbera == 1:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = max(_A , _A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
SCREAMING_SNAKE_CASE__ = prime_fac_a.count(_A )
SCREAMING_SNAKE_CASE__ = prime_fac_a.count(_A )
for _ in range(max(_A , _A ) ):
ans *= n
else:
SCREAMING_SNAKE_CASE__ = prime_fac_a.count(_A )
for _ in range(_A ):
ans *= n
done.append(_A )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
SCREAMING_SNAKE_CASE__ = prime_fac_a.count(_A )
for _ in range(_A ):
ans *= n
done.append(_A )
# precondition
assert isinstance(_A , _A ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (n >= 0), "'number' must been a positive int"
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_A ):
ans += 1
# precondition
assert isinstance(_A , _A ) and is_prime(
_A ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
assert (
is_prime(_A ) and is_prime(_A ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
SCREAMING_SNAKE_CASE__ = p_number_a + 1 # jump to the next number
SCREAMING_SNAKE_CASE__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_A ):
number += 1
while number < p_number_a:
ans.append(_A )
number += 1
# fetch the next prime number.
while not is_prime(_A ):
number += 1
# precondition
assert (
isinstance(_A , _A )
and ans[0] != p_number_a
and ans[len(_A ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (n >= 1), "'n' must been int and >= 1"
SCREAMING_SNAKE_CASE__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_A )
# precondition
assert ans[0] == 1 and ans[len(_A ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (
number > 1
), "'number' must been an int and >= 1"
SCREAMING_SNAKE_CASE__ = get_divisors(_A )
# precondition
assert (
isinstance(_A , _A )
and (divisors[0] == 1)
and (divisors[len(_A ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
SCREAMING_SNAKE_CASE__ = gcd(abs(_A ) , abs(_A ) )
# precondition
assert (
isinstance(_A , _A )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (n >= 0), "'n' must been a int and >= 0"
SCREAMING_SNAKE_CASE__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (n >= 0), "'n' must been an int and >= 0"
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1 # this will be return
for _ in range(n - 1 ):
SCREAMING_SNAKE_CASE__ = ans
ans += fiba
SCREAMING_SNAKE_CASE__ = tmp
return ans
| 314
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A , _A=False , _A=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(_A )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
__lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def lowercase_ ( self : Tuple ) -> List[str]:
return self._images
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
return self._labels
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self._num_examples
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._epochs_completed
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str:
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
SCREAMING_SNAKE_CASE__ = (
'''Validation size should be between 0 and '''
F'''{len(_A )}. Received: {validation_size}.'''
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 314
| 1
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : NestedDataStructureLike[PathLike] , __lowerCamelCase : Optional[NamedSplit] = None , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : str , ) -> int:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = field
SCREAMING_SNAKE_CASE__ = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ = Json(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , field=__lowerCamelCase , **__lowerCamelCase , )
def lowercase_ ( self : List[str] ) -> List[Any]:
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dataset , __lowerCamelCase : Union[PathLike, BinaryIO] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : Dict , ) -> Optional[Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
SCREAMING_SNAKE_CASE__ = dataset
SCREAMING_SNAKE_CASE__ = path_or_buf
SCREAMING_SNAKE_CASE__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ = num_proc
SCREAMING_SNAKE_CASE__ = '''utf-8'''
SCREAMING_SNAKE_CASE__ = to_json_kwargs
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''path_or_buf''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''orient''' , '''records''' )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''compression''' , __lowerCamelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowerCamelCase ) as buffer:
SCREAMING_SNAKE_CASE__ = self._write(file_obj=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
SCREAMING_SNAKE_CASE__ = self._write(
file_obj=self.path_or_buf , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs )
return written
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = query_table(
table=self.dataset.data , key=slice(__lowerCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ = batch.to_pandas().to_json(
path_or_buf=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **__lowerCamelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowercase_ ( self : Any , __lowerCamelCase : BinaryIO , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] , ) -> int:
SCREAMING_SNAKE_CASE__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
SCREAMING_SNAKE_CASE__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCamelCase , __lowerCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__lowerCamelCase )
return written
| 314
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SwinConfig(image_size=1_92 )
if "base" in model_name:
SCREAMING_SNAKE_CASE__ = 6
SCREAMING_SNAKE_CASE__ = 1_28
SCREAMING_SNAKE_CASE__ = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ = (4, 8, 16, 32)
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 1_92
SCREAMING_SNAKE_CASE__ = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
SCREAMING_SNAKE_CASE__ = window_size
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = num_heads
return config
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "encoder.mask_token" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ = '''layernorm.weight'''
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ = '''layernorm.bias'''
if "decoder" in name:
pass
else:
SCREAMING_SNAKE_CASE__ = '''swin.''' + name
return name
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(_A )
if "attn_mask" in key:
pass
elif "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(key_split[2] )
SCREAMING_SNAKE_CASE__ = int(key_split[4] )
SCREAMING_SNAKE_CASE__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[
:dim
]
SCREAMING_SNAKE_CASE__ = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE__ = get_swin_config(_A )
SCREAMING_SNAKE_CASE__ = SwinForMaskedImageModeling(_A )
model.eval()
SCREAMING_SNAKE_CASE__ = convert_state_dict(_A , _A )
model.load_state_dict(_A )
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = ViTImageProcessor(size={'''height''': 1_92, '''width''': 1_92} )
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw )
SCREAMING_SNAKE_CASE__ = image_processor(images=_A , return_tensors='''pt''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**_A ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314
| 1
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = limit + 1
SCREAMING_SNAKE_CASE__ = [0] * limit
for first_term in range(1 , _A ):
for n in range(_A , _A , _A ):
SCREAMING_SNAKE_CASE__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
SCREAMING_SNAKE_CASE__ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 314
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : List[Any] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
_SCREAMING_SNAKE_CASE : Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_VOCAB_FILES_MAP
a = ["input_ids", "attention_mask"]
a = MBartTokenizer
a = []
a = []
def __init__( self : int , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Tuple="<mask>" , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : str=None , **__lowerCamelCase : int , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
SCREAMING_SNAKE_CASE__ = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE__ = src_lang if src_lang is not None else '''en_XX'''
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def lowercase_ ( self : int , __lowerCamelCase : str ) -> None:
SCREAMING_SNAKE_CASE__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] , **__lowerCamelCase : Any ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tgt_lang_id
return inputs
def lowercase_ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : str = "en_XX" , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "ro_RO" , **__lowerCamelCase : Optional[Any] , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> None:
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self : Any , __lowerCamelCase : str ) -> None:
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 314
|
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 1
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : Optional[int] = 16
_SCREAMING_SNAKE_CASE : Optional[Any] = 32
def UpperCAmelCase_ ( _A , _A = 16 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ = datasets.map(
_A , batched=_A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ = 8
else:
SCREAMING_SNAKE_CASE__ = None
return tokenizer.pad(
_A , padding='''longest''' , max_length=_A , pad_to_multiple_of=_A , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=_A , collate_fn=_A , batch_size=_A , drop_last=_A )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_A , collate_fn=_A , batch_size=_A , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config['''lr''']
SCREAMING_SNAKE_CASE__ = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ = int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ = MAX_GPU_BATCH_SIZE
set_seed(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = get_dataloaders(_A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=_A )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=1_00 , num_training_steps=(len(_A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = accelerator.prepare(
_A , _A , _A , _A , _A )
# Now we train the model
for epoch in range(_A ):
model.train()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ = model(**_A )
SCREAMING_SNAKE_CASE__ = outputs.loss
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**_A )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_A , references=_A , )
SCREAMING_SNAKE_CASE__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _A )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_A , default=_A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 314
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
SCREAMING_SNAKE_CASE__ = number_of_bytes // partitions
SCREAMING_SNAKE_CASE__ = []
for i in range(_A ):
SCREAMING_SNAKE_CASE__ = i * bytes_per_partition + 1
SCREAMING_SNAKE_CASE__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 1
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 1
|
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [1]
for i in range(2 , _A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = list(range(_A ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE__ = factorials.pop()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = divmod(_A , _A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : int=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : int=True , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=99 , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : str=16 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : int=0.02 , __lowerCamelCase : int=3 , __lowerCamelCase : str=4 , __lowerCamelCase : Dict=None , ) -> str:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = NystromformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = NystromformerForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = NystromformerForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = NystromformerForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = NystromformerForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = NystromformerForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
a = False
def lowercase_ ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowercase_ ( self : Optional[int] ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def lowercase_ ( self : Union[str, Any] ) -> int:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = NystromformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : str ) -> str:
SCREAMING_SNAKE_CASE__ = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = '''the [MASK] of Belgium is Brussels'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
SCREAMING_SNAKE_CASE__ = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , return_tensors='''pt''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE__ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__lowerCamelCase ) , '''capital''' )
| 314
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
| 1
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowercase_ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE__ = ScoreSdeVeScheduler()
SCREAMING_SNAKE_CASE__ = ScoreSdeVePipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
sde_ve.to(__lowerCamelCase )
sde_ve.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__lowerCamelCase , return_dict=__lowerCamelCase )[
0
]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = '''google/ncsnpp-church-256'''
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = ScoreSdeVeScheduler.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = ScoreSdeVePipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
sde_ve.to(__lowerCamelCase )
sde_ve.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 314
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 1
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 0
a = False
a = 3.0
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[Any] ) -> Union[str, Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=__lowerCamelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> Tuple:
# If no defaults are changed, `to_kwargs` returns an empty dict.
SCREAMING_SNAKE_CASE__ = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
SCREAMING_SNAKE_CASE__ = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
SCREAMING_SNAKE_CASE__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __lowerCamelCase )
@require_multi_gpu
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_SCREAMING_SNAKE_CASE : str = Accelerator(kwargs_handlers=[ddp_scaler])
_SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear(100, 200)
_SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(model)
# Check the values changed in kwargs
_SCREAMING_SNAKE_CASE : Dict = ''''''
_SCREAMING_SNAKE_CASE : str = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 314
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 1
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_A ) == len(_A ), F'''{len(_A )} != {len(_A )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_SCREAMING_SNAKE_CASE : str = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(_A ) )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(_A ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def UpperCAmelCase_ ( _A , _A = "student" , _A = None , _A = None , _A=False , _A=None , _A=None , **_A , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(_A , _A ):
AutoTokenizer.from_pretrained(_A ).save_pretrained(_A ) # purely for convenience
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained(_A ).eval()
else:
assert isinstance(_A , _A ), F'''teacher must be a model or string got type {type(_A )}'''
SCREAMING_SNAKE_CASE__ = teacher.config.to_diff_dict()
try:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
SCREAMING_SNAKE_CASE__ = teacher_e
if d is None:
SCREAMING_SNAKE_CASE__ = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
SCREAMING_SNAKE_CASE__ = teacher_e
if d is None:
SCREAMING_SNAKE_CASE__ = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_A )
# Copy weights
SCREAMING_SNAKE_CASE__ = teacher.config_class(**_A )
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_config(_A )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
SCREAMING_SNAKE_CASE__ = student.load_state_dict(teacher.state_dict() , strict=_A )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = list(range(_A ) ), list(range(_A ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(_A )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
SCREAMING_SNAKE_CASE__ = pick_layers_to_copy(_A , _A )
if d_layers_to_copy is None:
SCREAMING_SNAKE_CASE__ = pick_layers_to_copy(_A , _A )
try:
if hasattr(
_A , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _A )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _A )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _A )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _A )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _A )
copy_layers(teacher.decoder.block , student.decoder.block , _A )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
SCREAMING_SNAKE_CASE__ = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(_A )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 314
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 1
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_SCREAMING_SNAKE_CASE : Optional[int] = False
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] , __lowerCamelCase : Union[str, Any]=32 ) -> Dict:
set_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(sample_size=__lowerCamelCase , in_channels=3 , out_channels=3 )
SCREAMING_SNAKE_CASE__ = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def lowercase_ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE__ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowerCamelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
SCREAMING_SNAKE_CASE__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__lowerCamelCase ) for _ in range(4 )]
SCREAMING_SNAKE_CASE__ = [torch.randn((4, 3, 32, 32) ).to(__lowerCamelCase ) for _ in range(4 )]
SCREAMING_SNAKE_CASE__ = [torch.randint(0 , 1000 , (4,) ).long().to(__lowerCamelCase ) for _ in range(4 )]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_model_optimizer(resolution=32 )
model.train().to(__lowerCamelCase )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , timesteps[i] ).sample
SCREAMING_SNAKE_CASE__ = torch.nn.functional.mse_loss(__lowerCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_model_optimizer(resolution=32 )
model.train().to(__lowerCamelCase )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , timesteps[i] ).sample
SCREAMING_SNAKE_CASE__ = torch.nn.functional.mse_loss(__lowerCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
| 314
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = ['''CLIPFeatureExtractor''']
_SCREAMING_SNAKE_CASE : Any = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314
| 1
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = IFImgaImgSuperResolutionPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
a = PipelineTesterMixin.required_optional_params - {"latents"}
def lowercase_ ( self : List[str] ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=0 ) -> Optional[Any]:
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self : Tuple ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase_ ( self : Optional[Any] ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase_ ( self : Optional[int] ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase_ ( self : str ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase_ ( self : Tuple ) -> List[str]:
self._test_save_load_local()
def lowercase_ ( self : Dict ) -> Optional[int]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 314
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : List[str] = TypeVar('''T''')
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
a = 42 # Cache store of keys
a = 42 # References of the keys in cache
a = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , __lowerCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE__ = deque()
SCREAMING_SNAKE_CASE__ = set()
if not n:
SCREAMING_SNAKE_CASE__ = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ = n
def lowercase_ ( self : Any , __lowerCamelCase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ = self.dq_store.pop()
self.key_reference.remove(__lowerCamelCase )
else:
self.dq_store.remove(__lowerCamelCase )
self.dq_store.appendleft(__lowerCamelCase )
self.key_reference.add(__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> None:
for k in self.dq_store:
print(__lowerCamelCase )
def __repr__( self : Any ) -> str:
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 314
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (UnCLIPScheduler,)
def lowercase_ ( self : List[str] , **__lowerCamelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCamelCase )
return config
def lowercase_ ( self : Dict ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowercase_ ( self : str ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def lowercase_ ( self : int ) -> Tuple:
pass
def lowercase_ ( self : Dict ) -> Union[str, Any]:
pass
| 314
| 1
|
def UpperCAmelCase_ ( _A = 4_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = b, a + b
return sum(_A )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( _A , _A , _A=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ = ''''''
else:
SCREAMING_SNAKE_CASE__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_A , _A )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dct.pop(_A )
SCREAMING_SNAKE_CASE__ = val
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_A , )
SCREAMING_SNAKE_CASE__ = ViTHybridConfig(backbone_config=_A , image_size=3_84 , num_labels=10_00 )
SCREAMING_SNAKE_CASE__ = False
# load original model from timm
SCREAMING_SNAKE_CASE__ = timm.create_model(_A , pretrained=_A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_A )
SCREAMING_SNAKE_CASE__ = create_rename_keys(_A , _A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(_A ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ = ViTHybridModel(_A ).eval()
else:
SCREAMING_SNAKE_CASE__ = ViTHybridForImageClassification(_A ).eval()
model.load_state_dict(_A )
# create image processor
SCREAMING_SNAKE_CASE__ = create_transform(**resolve_data_config({} , model=_A ) )
SCREAMING_SNAKE_CASE__ = transform.transforms
SCREAMING_SNAKE_CASE__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE__ = ViTHybridImageProcessor(
do_resize=_A , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_A , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = transform(_A ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = processor(_A , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_A , _A )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(_A )
SCREAMING_SNAKE_CASE__ = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
SCREAMING_SNAKE_CASE__ = timm_model.forward_features(_A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_A , outputs.pooler_output , atol=1e-3 )
else:
SCREAMING_SNAKE_CASE__ = timm_model(_A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_A , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_A )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_SCREAMING_SNAKE_CASE : List[Any] = '''hf-internal-testing/tiny-random-bert'''
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
_SCREAMING_SNAKE_CASE : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = cached_file(__lowerCamelCase , __lowerCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCamelCase , __lowerCamelCase ) ) )
with open(os.path.join(__lowerCamelCase , '''refs''' , '''main''' ) ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
self.assertEqual(__lowerCamelCase , os.path.join(__lowerCamelCase , '''snapshots''' , __lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE__ = cached_file(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE__ = cached_file(__lowerCamelCase , __lowerCamelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCamelCase , os.path.join(__lowerCamelCase , '''snapshots''' , __lowerCamelCase , __lowerCamelCase ) )
def lowercase_ ( self : Tuple ) -> Optional[Any]:
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = cached_file('''tiny-random-bert''' , __lowerCamelCase )
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid git identifier''' ):
SCREAMING_SNAKE_CASE__ = cached_file(__lowerCamelCase , __lowerCamelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCamelCase , '''does not appear to have a file named''' ):
SCREAMING_SNAKE_CASE__ = cached_file(__lowerCamelCase , '''conf''' )
def lowercase_ ( self : Union[str, Any] ) -> Any:
with self.assertRaisesRegex(__lowerCamelCase , '''does not appear to have a file named''' ):
SCREAMING_SNAKE_CASE__ = cached_file(__lowerCamelCase , '''conf''' )
with open(os.path.join(__lowerCamelCase , '''refs''' , '''main''' ) ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''.no_exist''' , __lowerCamelCase , '''conf''' ) ) )
SCREAMING_SNAKE_CASE__ = cached_file(__lowerCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = cached_file(__lowerCamelCase , '''conf''' , local_files_only=__lowerCamelCase , _raise_exceptions_for_missing_entries=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 500
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE__ = cached_file(__lowerCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self : Any ) -> Union[str, Any]:
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCamelCase ) )
def lowercase_ ( self : Any ) -> Dict:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCamelCase , revision='''ahaha''' )
SCREAMING_SNAKE_CASE__ = get_file_from_repo('''bert-base-cased''' , __lowerCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE__ = json.loads(open(__lowerCamelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def lowercase_ ( self : int ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = Path(__lowerCamelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCamelCase , '''a.txt''' ) , str(__lowerCamelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCamelCase , '''b.txt''' ) )
| 314
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 1
|
from __future__ import annotations
from statistics import mean
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_A ):
SCREAMING_SNAKE_CASE__ = burst_time[i]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = -1
for i in range(_A ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE__ = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
for i in range(_A ):
SCREAMING_SNAKE_CASE__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Union[str, Any] = [2, 5, 3, 7]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 0, 0, 0]
_SCREAMING_SNAKE_CASE : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_SCREAMING_SNAKE_CASE : List[str] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 314
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A , _A=False , _A=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(_A )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
__lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def lowercase_ ( self : Tuple ) -> List[str]:
return self._images
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
return self._labels
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self._num_examples
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._epochs_completed
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str:
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
SCREAMING_SNAKE_CASE__ = (
'''Validation size should be between 0 and '''
F'''{len(_A )}. Received: {validation_size}.'''
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 314
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [True] * limit
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE__ = i * 2
while index < limit:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = index + i
SCREAMING_SNAKE_CASE__ = [2]
for i in range(3 , _A , 2 ):
if is_prime[i]:
primes.append(_A )
return primes
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = prime_sieve(_A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(_A ) ):
for j in range(i + length , len(_A ) ):
SCREAMING_SNAKE_CASE__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE__ = j - i
SCREAMING_SNAKE_CASE__ = sol
return largest
if __name__ == "__main__":
print(F"{solution() = }")
| 314
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "Speech2TextFeatureExtractor"
a = "Speech2TextTokenizer"
def __init__( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.feature_extractor
SCREAMING_SNAKE_CASE__ = False
def __call__( self : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''audio''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''sampling_rate''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''text''' , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE__ = args[0]
SCREAMING_SNAKE_CASE__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ = encodings['''input_ids''']
return inputs
def lowercase_ ( self : Any , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> Optional[int]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[Any] ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def lowercase_ ( self : str ) -> List[str]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ = self.feature_extractor
SCREAMING_SNAKE_CASE__ = False
| 314
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if n == 1 or not isinstance(_A , _A ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE__ = len(str(fibonacci(_A ) ) )
return index
def UpperCAmelCase_ ( _A = 10_00 ):
'''simple docstring'''
return fibonacci_digits_index(_A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 314
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314
| 1
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = ProphetNetTokenizer
a = False
def lowercase_ ( self : List[Any] ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase_ ( self : Any , __lowerCamelCase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ = '''unwanted, running'''
return input_text, output_text
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase_ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase_ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase_ ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__lowerCamelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowercase_ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def lowercase_ ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
SCREAMING_SNAKE_CASE__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE__ = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowercase_ ( self : List[Any] ) -> str:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowercase_ ( self : str ) -> int:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowercase_ ( self : str ) -> int:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 314
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314
| 1
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Tuple , *__lowerCamelCase : str , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
requires_backends(self , '''decord''' )
self.check_model_type(__lowerCamelCase )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None ) -> List[str]:
SCREAMING_SNAKE_CASE__ = {}
if frame_sampling_rate is not None:
SCREAMING_SNAKE_CASE__ = frame_sampling_rate
if num_frames is not None:
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = {}
if top_k is not None:
SCREAMING_SNAKE_CASE__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , __lowerCamelCase : Union[str, List[str]] , **__lowerCamelCase : Union[str, Any] ) -> List[str]:
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=1 ) -> int:
if num_frames is None:
SCREAMING_SNAKE_CASE__ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
SCREAMING_SNAKE_CASE__ = BytesIO(requests.get(__lowerCamelCase ).content )
SCREAMING_SNAKE_CASE__ = VideoReader(__lowerCamelCase )
videoreader.seek(0 )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = num_frames * frame_sampling_rate - 1
SCREAMING_SNAKE_CASE__ = np.linspace(__lowerCamelCase , __lowerCamelCase , num=__lowerCamelCase , dtype=np.intaa )
SCREAMING_SNAKE_CASE__ = videoreader.get_batch(__lowerCamelCase ).asnumpy()
SCREAMING_SNAKE_CASE__ = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processor(__lowerCamelCase , return_tensors=self.framework )
return model_inputs
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model(**__lowerCamelCase )
return model_outputs
def lowercase_ ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int=5 ) -> Dict:
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE__ = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ = model_outputs.logits.softmax(-1 )[0]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = probs.topk(__lowerCamelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
SCREAMING_SNAKE_CASE__ = scores.tolist()
SCREAMING_SNAKE_CASE__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCamelCase , __lowerCamelCase )]
| 314
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
SCREAMING_SNAKE_CASE__ = 1_92
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = [8_00, 13_33]
SCREAMING_SNAKE_CASE__ = False
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE__ = 3_30
SCREAMING_SNAKE_CASE__ = 14
SCREAMING_SNAKE_CASE__ = 6
SCREAMING_SNAKE_CASE__ = 13_20
elif "yolos_s" in yolos_name:
SCREAMING_SNAKE_CASE__ = 3_84
SCREAMING_SNAKE_CASE__ = 15_36
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 6
elif "yolos_b" in yolos_name:
SCREAMING_SNAKE_CASE__ = [8_00, 13_44]
SCREAMING_SNAKE_CASE__ = 91
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = '''coco-detection-id2label.json'''
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(_A ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( _A , _A , _A = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-config.hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "backbone" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(_A )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(key_split[2] )
SCREAMING_SNAKE_CASE__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A , _A , _A = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_yolos_config(_A )
# load original state_dict
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location='''cpu''' )['''model''']
# load 🤗 model
SCREAMING_SNAKE_CASE__ = YolosForObjectDetection(_A )
model.eval()
SCREAMING_SNAKE_CASE__ = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by YolosImageProcessor
SCREAMING_SNAKE_CASE__ = 8_00 if yolos_name != '''yolos_ti''' else 5_12
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_detection''' , size=_A )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = model(**_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = outputs.logits, outputs.pred_boxes
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = None, None
if yolos_name == "yolos_ti":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1e-4 )
Path(_A ).mkdir(exist_ok=_A )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
if push_to_hub:
SCREAMING_SNAKE_CASE__ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
SCREAMING_SNAKE_CASE__ = model_mapping[yolos_name]
image_processor.push_to_hub(_A , organization='''hustvl''' )
model.push_to_hub(_A , organization='''hustvl''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314
|
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 1
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TaConfig.from_json_file(_A )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_A , _A , _A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 314
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_SCREAMING_SNAKE_CASE : str = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 1
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
_SCREAMING_SNAKE_CASE : Optional[Any] = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
_SCREAMING_SNAKE_CASE : List[str] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_SCREAMING_SNAKE_CASE : List[str] = BeautifulSoup(res.text, '''html.parser''')
_SCREAMING_SNAKE_CASE : str = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F"https://google.com{link.get('href')}")
| 314
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314
| 1
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=sys.maxsize ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = '''bilinear'''
SCREAMING_SNAKE_CASE__ = max_size
SCREAMING_SNAKE_CASE__ = short_edge_length
def __call__( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = []
for img in imgs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = img.shape[:2]
# later: provide list and randomly choose index for resize
SCREAMING_SNAKE_CASE__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
SCREAMING_SNAKE_CASE__ = size * 1.0 / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = size, scale * w
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = scale * h, size
if max(__lowerCamelCase , __lowerCamelCase ) > self.max_size:
SCREAMING_SNAKE_CASE__ = self.max_size * 1.0 / max(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = newh * scale
SCREAMING_SNAKE_CASE__ = neww * scale
SCREAMING_SNAKE_CASE__ = int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ = int(newh + 0.5 )
if img.dtype == np.uinta:
SCREAMING_SNAKE_CASE__ = Image.fromarray(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
SCREAMING_SNAKE_CASE__ = np.asarray(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
SCREAMING_SNAKE_CASE__ = nn.functional.interpolate(
__lowerCamelCase , (newh, neww) , mode=self.interp_method , align_corners=__lowerCamelCase ).squeeze(0 )
img_augs.append(__lowerCamelCase )
return img_augs
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
SCREAMING_SNAKE_CASE__ = cfg.INPUT.FORMAT
SCREAMING_SNAKE_CASE__ = cfg.SIZE_DIVISIBILITY
SCREAMING_SNAKE_CASE__ = cfg.PAD_VALUE
SCREAMING_SNAKE_CASE__ = cfg.INPUT.MAX_SIZE_TEST
SCREAMING_SNAKE_CASE__ = cfg.MODEL.DEVICE
SCREAMING_SNAKE_CASE__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
SCREAMING_SNAKE_CASE__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
SCREAMING_SNAKE_CASE__ = lambda __lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tuple(max(__lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
SCREAMING_SNAKE_CASE__ = [im.shape[-2:] for im in images]
SCREAMING_SNAKE_CASE__ = [
nn.functional.pad(
__lowerCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__lowerCamelCase , __lowerCamelCase )
]
return torch.stack(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : str , __lowerCamelCase : Tuple=False ) -> List[str]:
with torch.no_grad():
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = [images]
if single_image:
assert len(__lowerCamelCase ) == 1
for i in range(len(__lowerCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__lowerCamelCase , images.pop(__lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__lowerCamelCase , torch.as_tensor(img_tensorize(images.pop(__lowerCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
SCREAMING_SNAKE_CASE__ = torch.tensor([im.shape[:2] for im in images] )
SCREAMING_SNAKE_CASE__ = self.aug(__lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
SCREAMING_SNAKE_CASE__ = [self.normalizer(__lowerCamelCase ) for x in images]
# now pad them to do the following operations
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.pad(__lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
SCREAMING_SNAKE_CASE__ = torch.true_divide(__lowerCamelCase , __lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
assert torch.isfinite(_A ).all(), "Box tensor contains infinite or NaN!"
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = box_size
tensor[:, 0].clamp_(min=0 , max=_A )
tensor[:, 1].clamp_(min=0 , max=_A )
tensor[:, 2].clamp_(min=0 , max=_A )
tensor[:, 3].clamp_(min=0 , max=_A )
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Dict , **__lowerCamelCase : Dict ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = ["pixel_values"]
def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : float = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : Optional[int] , ) -> None:
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 384}
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE__ = crop_pct if crop_pct is not None else 224 / 256
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : float , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE__ = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(__lowerCamelCase , size=__lowerCamelCase , default_to_square=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=__lowerCamelCase , **__lowerCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[Any] , ) -> Any:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : int , ) -> np.ndarray:
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : float = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Any , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , crop_pct=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 314
|
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 1
|
import os
import numpy
import onnx
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = a.name
SCREAMING_SNAKE_CASE__ = b.name
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = a == b
SCREAMING_SNAKE_CASE__ = name_a
SCREAMING_SNAKE_CASE__ = name_b
return res
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_A , _A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
_graph_replace_input_with(node_proto.attribute[1].g , _A , _A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_A , _A , _A )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(model.graph.initializer )
SCREAMING_SNAKE_CASE__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE__ = inits[i].name
SCREAMING_SNAKE_CASE__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _A , _A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = os.path.dirname(_A )
SCREAMING_SNAKE_CASE__ = os.path.basename(_A )
SCREAMING_SNAKE_CASE__ = onnx.load(os.path.join(_A , _A ) )
SCREAMING_SNAKE_CASE__ = list(model.graph.initializer )
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(_A ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_A )
dup_set.add(_A )
SCREAMING_SNAKE_CASE__ = inits[j].data_type
SCREAMING_SNAKE_CASE__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , _A )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE__ = inits[i].name
SCREAMING_SNAKE_CASE__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_A )
else:
SCREAMING_SNAKE_CASE__ = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 10_24 / 10_24 / 10_24 , '''GB''' )
SCREAMING_SNAKE_CASE__ = sorted(_A )
_remove_dup_initializers_from_model(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = '''optimized_''' + model_file_name
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
onnx.save(_A , _A )
return new_model
| 314
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 1
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = to_pil_image(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = pil_image.size
SCREAMING_SNAKE_CASE__ = pytesseract.image_to_data(_A , lang=_A , output_type='''dict''' , config=_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ = [idx for idx, word in enumerate(_A ) if not word.strip()]
SCREAMING_SNAKE_CASE__ = [word for idx, word in enumerate(_A ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ = []
for x, y, w, h in zip(_A , _A , _A , _A ):
SCREAMING_SNAKE_CASE__ = [x, y, x + w, y + h]
actual_boxes.append(_A )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_A , _A , _A ) )
assert len(_A ) == len(_A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = ["pixel_values"]
def __init__( self : Tuple , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : float = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[float, Iterable[float]] = None , __lowerCamelCase : Union[float, Iterable[float]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "" , **__lowerCamelCase : List[Any] , ) -> None:
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_value
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE__ = apply_ocr
SCREAMING_SNAKE_CASE__ = ocr_lang
SCREAMING_SNAKE_CASE__ = tesseract_config
def lowercase_ ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ = (size['''height'''], size['''width'''])
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ) -> np.ndarray:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, Iterable[float]] , __lowerCamelCase : Union[float, Iterable[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ) -> np.ndarray:
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : Tuple , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Union[float, Iterable[float]] = None , __lowerCamelCase : Union[float, Iterable[float]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : int , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(__lowerCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for image in images:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = apply_tesseract(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
words_batch.append(__lowerCamelCase )
boxes_batch.append(__lowerCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowerCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE__ = words_batch
SCREAMING_SNAKE_CASE__ = boxes_batch
return data
| 314
|
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.